summaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
Diffstat (limited to 'tests')
-rw-r--r--tests/cloud_tests/__init__.py39
-rw-r--r--tests/cloud_tests/__main__.py71
-rw-r--r--tests/cloud_tests/args.py304
-rw-r--r--tests/cloud_tests/bddeb.py119
-rw-r--r--tests/cloud_tests/collect.py219
-rw-r--r--tests/cloud_tests/config.py165
-rw-r--r--tests/cloud_tests/manage.py74
-rw-r--r--tests/cloud_tests/platforms.yaml77
-rw-r--r--tests/cloud_tests/platforms/__init__.py43
-rw-r--r--tests/cloud_tests/platforms/azurecloud/image.py116
-rw-r--r--tests/cloud_tests/platforms/azurecloud/instance.py247
-rw-r--r--tests/cloud_tests/platforms/azurecloud/platform.py240
-rw-r--r--tests/cloud_tests/platforms/azurecloud/regions.json42
-rw-r--r--tests/cloud_tests/platforms/azurecloud/snapshot.py58
-rw-r--r--tests/cloud_tests/platforms/ec2/image.py100
-rw-r--r--tests/cloud_tests/platforms/ec2/instance.py132
-rw-r--r--tests/cloud_tests/platforms/ec2/platform.py263
-rw-r--r--tests/cloud_tests/platforms/ec2/snapshot.py66
-rw-r--r--tests/cloud_tests/platforms/images.py56
-rw-r--r--tests/cloud_tests/platforms/instances.py165
-rw-r--r--tests/cloud_tests/platforms/lxd/image.py211
-rw-r--r--tests/cloud_tests/platforms/lxd/instance.py278
-rw-r--r--tests/cloud_tests/platforms/lxd/platform.py104
-rw-r--r--tests/cloud_tests/platforms/lxd/snapshot.py53
-rw-r--r--tests/cloud_tests/platforms/nocloudkvm/image.py79
-rw-r--r--tests/cloud_tests/platforms/nocloudkvm/instance.py197
-rw-r--r--tests/cloud_tests/platforms/nocloudkvm/platform.py94
-rw-r--r--tests/cloud_tests/platforms/nocloudkvm/snapshot.py59
-rw-r--r--tests/cloud_tests/platforms/platforms.py109
-rw-r--r--tests/cloud_tests/platforms/snapshots.py44
-rw-r--r--tests/cloud_tests/releases.yaml364
-rw-r--r--tests/cloud_tests/run_funcs.py75
-rw-r--r--tests/cloud_tests/setup_image.py237
-rw-r--r--tests/cloud_tests/stage.py116
-rw-r--r--tests/cloud_tests/testcases.yaml50
-rw-r--r--tests/cloud_tests/testcases/__init__.py73
-rw-r--r--tests/cloud_tests/testcases/base.py385
-rw-r--r--tests/cloud_tests/testcases/bugs/README.md13
-rw-r--r--tests/cloud_tests/testcases/bugs/__init__.py8
-rw-r--r--tests/cloud_tests/testcases/bugs/lp1511485.py15
-rw-r--r--tests/cloud_tests/testcases/bugs/lp1511485.yaml11
-rw-r--r--tests/cloud_tests/testcases/bugs/lp1611074.yaml8
-rw-r--r--tests/cloud_tests/testcases/bugs/lp1628337.py23
-rw-r--r--tests/cloud_tests/testcases/bugs/lp1628337.yaml23
-rw-r--r--tests/cloud_tests/testcases/examples/README.md12
-rw-r--r--tests/cloud_tests/testcases/examples/TODO.md15
-rw-r--r--tests/cloud_tests/testcases/examples/__init__.py8
-rw-r--r--tests/cloud_tests/testcases/examples/add_apt_repositories.py20
-rw-r--r--tests/cloud_tests/testcases/examples/add_apt_repositories.yaml23
-rw-r--r--tests/cloud_tests/testcases/examples/alter_completion_message.py40
-rw-r--r--tests/cloud_tests/testcases/examples/alter_completion_message.yaml16
-rw-r--r--tests/cloud_tests/testcases/examples/configure_instance_trusted_ca_certificates.py27
-rw-r--r--tests/cloud_tests/testcases/examples/configure_instance_trusted_ca_certificates.yaml41
-rw-r--r--tests/cloud_tests/testcases/examples/configure_instances_ssh_keys.py31
-rw-r--r--tests/cloud_tests/testcases/examples/configure_instances_ssh_keys.yaml63
-rw-r--r--tests/cloud_tests/testcases/examples/including_user_groups.py49
-rw-r--r--tests/cloud_tests/testcases/examples/including_user_groups.yaml56
-rw-r--r--tests/cloud_tests/testcases/examples/install_arbitrary_packages.py20
-rw-r--r--tests/cloud_tests/testcases/examples/install_arbitrary_packages.yaml20
-rw-r--r--tests/cloud_tests/testcases/examples/install_run_chef_recipes.py17
-rw-r--r--tests/cloud_tests/testcases/examples/install_run_chef_recipes.yaml104
-rw-r--r--tests/cloud_tests/testcases/examples/run_apt_upgrade.py19
-rw-r--r--tests/cloud_tests/testcases/examples/run_apt_upgrade.yaml11
-rw-r--r--tests/cloud_tests/testcases/examples/run_commands.py15
-rw-r--r--tests/cloud_tests/testcases/examples/run_commands.yaml16
-rw-r--r--tests/cloud_tests/testcases/examples/run_commands_first_boot.py15
-rw-r--r--tests/cloud_tests/testcases/examples/run_commands_first_boot.yaml16
-rw-r--r--tests/cloud_tests/testcases/examples/setup_run_puppet.yaml55
-rw-r--r--tests/cloud_tests/testcases/examples/writing_out_arbitrary_files.py30
-rw-r--r--tests/cloud_tests/testcases/examples/writing_out_arbitrary_files.yaml45
-rw-r--r--tests/cloud_tests/testcases/main/README.md11
-rw-r--r--tests/cloud_tests/testcases/main/__init__.py8
-rw-r--r--tests/cloud_tests/testcases/main/command_output_simple.py21
-rw-r--r--tests/cloud_tests/testcases/main/command_output_simple.yaml13
-rw-r--r--tests/cloud_tests/testcases/modules/README.md12
-rw-r--r--tests/cloud_tests/testcases/modules/TODO.md95
-rw-r--r--tests/cloud_tests/testcases/modules/__init__.py8
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_conf.py20
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_conf.yaml21
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_disable_suites.py15
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_disable_suites.yaml20
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_primary.py24
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_primary.yaml19
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_proxy.py22
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_proxy.yaml18
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_security.py15
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_security.yaml18
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_sources_key.py23
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_sources_key.yaml50
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_sources_keyserver.py23
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_sources_keyserver.yaml23
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_sources_list.py31
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_sources_list.yaml28
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_sources_ppa.py23
-rw-r--r--tests/cloud_tests/testcases/modules/apt_configure_sources_ppa.yaml29
-rw-r--r--tests/cloud_tests/testcases/modules/apt_pipelining_disable.py15
-rw-r--r--tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml14
-rw-r--r--tests/cloud_tests/testcases/modules/apt_pipelining_os.py15
-rw-r--r--tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml14
-rw-r--r--tests/cloud_tests/testcases/modules/bootcmd.py15
-rw-r--r--tests/cloud_tests/testcases/modules/bootcmd.yaml13
-rw-r--r--tests/cloud_tests/testcases/modules/byobu.py24
-rw-r--r--tests/cloud_tests/testcases/modules/byobu.yaml17
-rw-r--r--tests/cloud_tests/testcases/modules/ca_certs.py33
-rw-r--r--tests/cloud_tests/testcases/modules/ca_certs.yaml56
-rw-r--r--tests/cloud_tests/testcases/modules/debug_disable.py16
-rw-r--r--tests/cloud_tests/testcases/modules/debug_disable.yaml9
-rw-r--r--tests/cloud_tests/testcases/modules/debug_enable.py15
-rw-r--r--tests/cloud_tests/testcases/modules/debug_enable.yaml9
-rw-r--r--tests/cloud_tests/testcases/modules/final_message.py40
-rw-r--r--tests/cloud_tests/testcases/modules/final_message.yaml13
-rw-r--r--tests/cloud_tests/testcases/modules/keys_to_console.py22
-rw-r--r--tests/cloud_tests/testcases/modules/keys_to_console.yaml15
-rw-r--r--tests/cloud_tests/testcases/modules/landscape.yaml28
-rw-r--r--tests/cloud_tests/testcases/modules/locale.py30
-rw-r--r--tests/cloud_tests/testcases/modules/locale.yaml22
-rw-r--r--tests/cloud_tests/testcases/modules/lxd_bridge.py36
-rw-r--r--tests/cloud_tests/testcases/modules/lxd_bridge.yaml32
-rw-r--r--tests/cloud_tests/testcases/modules/lxd_dir.py30
-rw-r--r--tests/cloud_tests/testcases/modules/lxd_dir.yaml19
-rw-r--r--tests/cloud_tests/testcases/modules/ntp.py24
-rw-r--r--tests/cloud_tests/testcases/modules/ntp.yaml22
-rw-r--r--tests/cloud_tests/testcases/modules/ntp_chrony.py26
-rw-r--r--tests/cloud_tests/testcases/modules/ntp_chrony.yaml17
-rw-r--r--tests/cloud_tests/testcases/modules/ntp_pools.py34
-rw-r--r--tests/cloud_tests/testcases/modules/ntp_pools.yaml32
-rw-r--r--tests/cloud_tests/testcases/modules/ntp_servers.py34
-rw-r--r--tests/cloud_tests/testcases/modules/ntp_servers.yaml28
-rw-r--r--tests/cloud_tests/testcases/modules/ntp_timesyncd.py15
-rw-r--r--tests/cloud_tests/testcases/modules/ntp_timesyncd.yaml15
-rw-r--r--tests/cloud_tests/testcases/modules/package_update_upgrade_install.py36
-rw-r--r--tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml30
-rw-r--r--tests/cloud_tests/testcases/modules/runcmd.py15
-rw-r--r--tests/cloud_tests/testcases/modules/runcmd.yaml13
-rw-r--r--tests/cloud_tests/testcases/modules/seed_random_command.yaml18
-rw-r--r--tests/cloud_tests/testcases/modules/seed_random_data.py15
-rw-r--r--tests/cloud_tests/testcases/modules/seed_random_data.yaml15
-rw-r--r--tests/cloud_tests/testcases/modules/set_hostname.py17
-rw-r--r--tests/cloud_tests/testcases/modules/set_hostname.yaml21
-rw-r--r--tests/cloud_tests/testcases/modules/set_hostname_fqdn.py31
-rw-r--r--tests/cloud_tests/testcases/modules/set_hostname_fqdn.yaml23
-rw-r--r--tests/cloud_tests/testcases/modules/set_password.py22
-rw-r--r--tests/cloud_tests/testcases/modules/set_password.yaml19
-rw-r--r--tests/cloud_tests/testcases/modules/set_password_expire.py23
-rw-r--r--tests/cloud_tests/testcases/modules/set_password_expire.yaml32
-rw-r--r--tests/cloud_tests/testcases/modules/set_password_list.py12
-rw-r--r--tests/cloud_tests/testcases/modules/set_password_list.yaml41
-rw-r--r--tests/cloud_tests/testcases/modules/set_password_list_string.py12
-rw-r--r--tests/cloud_tests/testcases/modules/set_password_list_string.yaml41
-rw-r--r--tests/cloud_tests/testcases/modules/snap.py16
-rw-r--r--tests/cloud_tests/testcases/modules/snap.yaml21
-rw-r--r--tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.py16
-rw-r--r--tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.yaml14
-rw-r--r--tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_enable.py18
-rw-r--r--tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_enable.yaml21
-rw-r--r--tests/cloud_tests/testcases/modules/ssh_import_id.py17
-rw-r--r--tests/cloud_tests/testcases/modules/ssh_import_id.yaml17
-rw-r--r--tests/cloud_tests/testcases/modules/ssh_keys_generate.py52
-rw-r--r--tests/cloud_tests/testcases/modules/ssh_keys_generate.yaml38
-rw-r--r--tests/cloud_tests/testcases/modules/ssh_keys_provided.py58
-rw-r--r--tests/cloud_tests/testcases/modules/ssh_keys_provided.yaml99
-rw-r--r--tests/cloud_tests/testcases/modules/timezone.py15
-rw-r--r--tests/cloud_tests/testcases/modules/timezone.yaml16
-rw-r--r--tests/cloud_tests/testcases/modules/user_groups.py49
-rw-r--r--tests/cloud_tests/testcases/modules/user_groups.yaml55
-rw-r--r--tests/cloud_tests/testcases/modules/write_files.py33
-rw-r--r--tests/cloud_tests/testcases/modules/write_files.yaml53
-rw-r--r--tests/cloud_tests/util.py532
-rw-r--r--tests/cloud_tests/verify.py149
-rw-r--r--tests/configs/sample1.yaml49
-rw-r--r--tests/data/netinfo/sample-ipaddrshow-json91
-rw-r--r--tests/data/netinfo/sample-ipaddrshow-json-down57
-rw-r--r--tests/data/netinfo/sample-ipaddrshow-output3
-rw-r--r--tests/data/old_pickles/trusty-14.04.1-0.7.5.pkl504
-rw-r--r--tests/integration_tests/__init__.py14
-rw-r--r--tests/integration_tests/assets/keys/id_rsa.test138
-rw-r--r--tests/integration_tests/assets/keys/id_rsa.test1.pub1
-rw-r--r--tests/integration_tests/assets/keys/id_rsa.test238
-rw-r--r--tests/integration_tests/assets/keys/id_rsa.test2.pub1
-rw-r--r--tests/integration_tests/assets/keys/id_rsa.test338
-rw-r--r--tests/integration_tests/assets/keys/id_rsa.test3.pub1
-rw-r--r--tests/integration_tests/assets/test_version_change.pklbin0 -> 21 bytes
-rw-r--r--tests/integration_tests/assets/trusty_with_mime.pkl572
-rw-r--r--tests/integration_tests/bugs/test_gh570.py39
-rw-r--r--tests/integration_tests/bugs/test_gh626.py43
-rw-r--r--tests/integration_tests/bugs/test_gh632.py33
-rw-r--r--tests/integration_tests/bugs/test_gh668.py46
-rw-r--r--tests/integration_tests/bugs/test_gh671.py53
-rw-r--r--tests/integration_tests/bugs/test_gh868.py27
-rw-r--r--tests/integration_tests/bugs/test_lp1813396.py31
-rw-r--r--tests/integration_tests/bugs/test_lp1835584.py101
-rw-r--r--tests/integration_tests/bugs/test_lp1886531.py4
-rw-r--r--tests/integration_tests/bugs/test_lp1897099.py14
-rw-r--r--tests/integration_tests/bugs/test_lp1898997.py77
-rw-r--r--tests/integration_tests/bugs/test_lp1900837.py5
-rw-r--r--tests/integration_tests/bugs/test_lp1901011.py67
-rw-r--r--tests/integration_tests/bugs/test_lp1910835.py64
-rw-r--r--tests/integration_tests/bugs/test_lp1912844.py105
-rw-r--r--tests/integration_tests/clouds.py331
-rw-r--r--tests/integration_tests/conftest.py273
-rw-r--r--tests/integration_tests/datasources/test_lxd_discovery.py90
-rw-r--r--tests/integration_tests/datasources/test_network_dependency.py33
-rw-r--r--tests/integration_tests/instances.py207
-rw-r--r--tests/integration_tests/integration_settings.py61
-rw-r--r--tests/integration_tests/modules/test_apt.py354
-rw-r--r--tests/integration_tests/modules/test_apt_configure_sources_list.py51
-rw-r--r--tests/integration_tests/modules/test_ca_certs.py90
-rw-r--r--tests/integration_tests/modules/test_cli.py81
-rw-r--r--tests/integration_tests/modules/test_combined.py342
-rw-r--r--tests/integration_tests/modules/test_command_output.py21
-rw-r--r--tests/integration_tests/modules/test_disk_setup.py212
-rw-r--r--tests/integration_tests/modules/test_growpart.py68
-rw-r--r--tests/integration_tests/modules/test_hotplug.py112
-rw-r--r--tests/integration_tests/modules/test_jinja_templating.py33
-rw-r--r--tests/integration_tests/modules/test_keyboard.py17
-rw-r--r--tests/integration_tests/modules/test_keys_to_console.py113
-rw-r--r--tests/integration_tests/modules/test_lxd_bridge.py46
-rw-r--r--tests/integration_tests/modules/test_ntp_servers.py98
-rw-r--r--tests/integration_tests/modules/test_package_update_upgrade_install.py19
-rw-r--r--tests/integration_tests/modules/test_persistence.py32
-rw-r--r--tests/integration_tests/modules/test_power_state_change.py97
-rw-r--r--tests/integration_tests/modules/test_puppet.py39
-rw-r--r--tests/integration_tests/modules/test_runcmd.py25
-rw-r--r--tests/integration_tests/modules/test_seed_random_data.py28
-rw-r--r--tests/integration_tests/modules/test_set_hostname.py27
-rw-r--r--tests/integration_tests/modules/test_set_password.py57
-rw-r--r--tests/integration_tests/modules/test_snap.py29
-rw-r--r--tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py18
-rw-r--r--tests/integration_tests/modules/test_ssh_generate.py16
-rw-r--r--tests/integration_tests/modules/test_ssh_import_id.py29
-rw-r--r--tests/integration_tests/modules/test_ssh_keys_provided.py122
-rw-r--r--tests/integration_tests/modules/test_ssh_keysfile.py224
-rw-r--r--tests/integration_tests/modules/test_timezone.py25
-rw-r--r--tests/integration_tests/modules/test_user_events.py110
-rw-r--r--tests/integration_tests/modules/test_users_groups.py50
-rw-r--r--tests/integration_tests/modules/test_version_change.py76
-rw-r--r--tests/integration_tests/modules/test_write_files.py47
-rw-r--r--tests/integration_tests/network/test_net_config_load.py27
-rw-r--r--tests/integration_tests/test_logging.py22
-rw-r--r--tests/integration_tests/test_shell_script_by_frequency.py48
-rw-r--r--tests/integration_tests/test_upgrade.py188
-rw-r--r--tests/integration_tests/util.py142
-rw-r--r--tests/unittests/__init__.py1
-rw-r--r--tests/unittests/analyze/test_boot.py174
-rw-r--r--tests/unittests/analyze/test_dump.py247
-rw-r--r--tests/unittests/cloudinit/__init__py (renamed from tests/cloud_tests/platforms/azurecloud/__init__.py)0
-rw-r--r--tests/unittests/cmd/__init__.py (renamed from tests/cloud_tests/platforms/ec2/__init__.py)0
-rw-r--r--tests/unittests/cmd/devel/__init__.py (renamed from tests/cloud_tests/platforms/lxd/__init__.py)0
-rw-r--r--tests/unittests/cmd/devel/test_hotplug_hook.py236
-rw-r--r--tests/unittests/cmd/devel/test_logs.py213
-rw-r--r--tests/unittests/cmd/devel/test_render.py154
-rw-r--r--tests/unittests/cmd/test_clean.py211
-rw-r--r--tests/unittests/cmd/test_cloud_id.py187
-rw-r--r--tests/unittests/cmd/test_main.py241
-rw-r--r--tests/unittests/cmd/test_query.py537
-rw-r--r--tests/unittests/cmd/test_status.py548
-rw-r--r--tests/unittests/config/__init__.py (renamed from tests/cloud_tests/platforms/nocloudkvm/__init__.py)0
-rw-r--r--tests/unittests/config/test_apt_conf_v1.py (renamed from tests/unittests/test_handler/test_handler_apt_conf_v1.py)68
-rw-r--r--tests/unittests/config/test_apt_configure_sources_list_v1.py (renamed from tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py)152
-rw-r--r--tests/unittests/config/test_apt_configure_sources_list_v3.py (renamed from tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py)190
-rw-r--r--tests/unittests/config/test_apt_key.py124
-rw-r--r--tests/unittests/config/test_apt_source_v1.py852
-rw-r--r--tests/unittests/config/test_apt_source_v3.py1442
-rw-r--r--tests/unittests/config/test_cc_apk_configure.py410
-rw-r--r--tests/unittests/config/test_cc_apt_configure.py202
-rw-r--r--tests/unittests/config/test_cc_apt_pipelining.py65
-rw-r--r--tests/unittests/config/test_cc_bootcmd.py165
-rw-r--r--tests/unittests/config/test_cc_byobu.py51
-rw-r--r--tests/unittests/config/test_cc_ca_certs.py507
-rw-r--r--tests/unittests/config/test_cc_chef.py464
-rw-r--r--tests/unittests/config/test_cc_debug.py112
-rw-r--r--tests/unittests/config/test_cc_disable_ec2_metadata.py81
-rw-r--r--tests/unittests/config/test_cc_disk_setup.py333
-rw-r--r--tests/unittests/config/test_cc_final_message.py46
-rw-r--r--tests/unittests/config/test_cc_growpart.py (renamed from tests/unittests/test_handler/test_handler_growpart.py)223
-rw-r--r--tests/unittests/config/test_cc_grub_dpkg.py187
-rw-r--r--tests/unittests/config/test_cc_install_hotplug.py129
-rw-r--r--tests/unittests/config/test_cc_keys_to_console.py40
-rw-r--r--tests/unittests/config/test_cc_landscape.py170
-rw-r--r--tests/unittests/config/test_cc_locale.py123
-rw-r--r--tests/unittests/config/test_cc_lxd.py272
-rw-r--r--tests/unittests/config/test_cc_mcollective.py (renamed from tests/unittests/test_handler/test_handler_mcollective.py)123
-rw-r--r--tests/unittests/config/test_cc_mounts.py522
-rw-r--r--tests/unittests/config/test_cc_ntp.py (renamed from tests/unittests/test_handler/test_handler_ntp.py)762
-rw-r--r--tests/unittests/config/test_cc_power_state_change.py (renamed from tests/unittests/test_handler/test_handler_power_state.py)78
-rw-r--r--tests/unittests/config/test_cc_puppet.py450
-rw-r--r--tests/unittests/config/test_cc_refresh_rmc_and_interface.py157
-rw-r--r--tests/unittests/config/test_cc_resizefs.py490
-rw-r--r--tests/unittests/config/test_cc_resizefs_vyos.py490
-rw-r--r--tests/unittests/config/test_cc_resolv_conf.py197
-rw-r--r--tests/unittests/config/test_cc_rh_subscription.py320
-rw-r--r--tests/unittests/config/test_cc_rsyslog.py (renamed from tests/unittests/test_handler/test_handler_rsyslog.py)114
-rw-r--r--tests/unittests/config/test_cc_runcmd.py137
-rw-r--r--tests/unittests/config/test_cc_seed_random.py221
-rw-r--r--tests/unittests/config/test_cc_set_hostname.py208
-rw-r--r--tests/unittests/config/test_cc_set_passwords.py177
-rw-r--r--tests/unittests/config/test_cc_snap.py640
-rw-r--r--tests/unittests/config/test_cc_spacewalk.py (renamed from tests/unittests/test_handler/test_handler_spacewalk.py)36
-rw-r--r--tests/unittests/config/test_cc_ssh.py467
-rw-r--r--tests/unittests/config/test_cc_timezone.py (renamed from tests/unittests/test_handler/test_handler_timezone.py)49
-rw-r--r--tests/unittests/config/test_cc_ubuntu_advantage.py391
-rw-r--r--tests/unittests/config/test_cc_ubuntu_drivers.py293
-rw-r--r--tests/unittests/config/test_cc_update_etc_hosts.py68
-rw-r--r--tests/unittests/config/test_cc_users_groups.py268
-rw-r--r--tests/unittests/config/test_cc_write_files.py (renamed from tests/unittests/test_handler/test_handler_write_files.py)154
-rw-r--r--tests/unittests/config/test_cc_write_files_deferred.py85
-rw-r--r--tests/unittests/config/test_cc_yum_add_repo.py120
-rw-r--r--tests/unittests/config/test_cc_zypper_add_repo.py (renamed from tests/unittests/test_handler/test_handler_zypper_add_repo.py)170
-rw-r--r--tests/unittests/config/test_schema.py917
-rw-r--r--tests/unittests/distros/__init__.py (renamed from tests/unittests/test_distros/__init__.py)10
-rw-r--r--tests/unittests/distros/test_arch.py55
-rw-r--r--tests/unittests/distros/test_bsd_utils.py66
-rw-r--r--tests/unittests/distros/test_create_users.py282
-rw-r--r--tests/unittests/distros/test_debian.py211
-rw-r--r--tests/unittests/distros/test_dragonflybsd.py25
-rw-r--r--tests/unittests/distros/test_freebsd.py (renamed from tests/unittests/test_distros/test_freebsd.py)28
-rw-r--r--tests/unittests/distros/test_generic.py383
-rw-r--r--tests/unittests/distros/test_gentoo.py (renamed from tests/unittests/test_distros/test_gentoo.py)13
-rw-r--r--tests/unittests/distros/test_hostname.py (renamed from tests/unittests/test_distros/test_hostname.py)16
-rw-r--r--tests/unittests/distros/test_hosts.py47
-rw-r--r--tests/unittests/distros/test_init.py248
-rw-r--r--tests/unittests/distros/test_manage_service.py41
-rw-r--r--tests/unittests/distros/test_netbsd.py (renamed from tests/unittests/test_distros/test_netbsd.py)11
-rw-r--r--tests/unittests/distros/test_netconfig.py (renamed from tests/unittests/test_distros/test_netconfig.py)682
-rw-r--r--tests/unittests/distros/test_networking.py231
-rw-r--r--tests/unittests/distros/test_opensuse.py (renamed from tests/unittests/test_distros/test_opensuse.py)5
-rw-r--r--tests/unittests/distros/test_photon.py68
-rw-r--r--tests/unittests/distros/test_resolv.py (renamed from tests/unittests/test_distros/test_resolv.py)55
-rw-r--r--tests/unittests/distros/test_sles.py (renamed from tests/unittests/test_distros/test_sles.py)5
-rw-r--r--tests/unittests/distros/test_sysconfig.py (renamed from tests/unittests/test_distros/test_sysconfig.py)64
-rw-r--r--tests/unittests/distros/test_user_data_normalize.py365
-rw-r--r--tests/unittests/filters/__init__.py (renamed from tests/unittests/test_datasource/__init__.py)0
-rw-r--r--tests/unittests/filters/test_launch_index.py (renamed from tests/unittests/test_filters/test_launch_index.py)23
-rw-r--r--tests/unittests/helpers.py554
-rw-r--r--tests/unittests/net/__init__.py (renamed from tests/unittests/test_filters/__init__.py)0
-rw-r--r--tests/unittests/net/test_dhcp.py797
-rw-r--r--tests/unittests/net/test_init.py1734
-rw-r--r--tests/unittests/net/test_network_state.py222
-rw-r--r--tests/unittests/net/test_networkd.py64
-rw-r--r--tests/unittests/runs/__init__.py (renamed from tests/unittests/test_handler/__init__.py)0
-rw-r--r--tests/unittests/runs/test_merge_run.py61
-rw-r--r--tests/unittests/runs/test_simple_run.py (renamed from tests/unittests/test_runs/test_simple_run.py)134
-rw-r--r--tests/unittests/sources/__init__.py (renamed from tests/unittests/test_runs/__init__.py)0
-rw-r--r--tests/unittests/sources/helpers/test_netlink.py573
-rw-r--r--tests/unittests/sources/helpers/test_openstack.py62
-rw-r--r--tests/unittests/sources/test_aliyun.py287
-rw-r--r--tests/unittests/sources/test_altcloud.py (renamed from tests/unittests/test_datasource/test_altcloud.py)311
-rw-r--r--tests/unittests/sources/test_azure.py4306
-rw-r--r--tests/unittests/sources/test_azure_helper.py (renamed from tests/unittests/test_datasource/test_azure_helper.py)1156
-rw-r--r--tests/unittests/sources/test_cloudsigma.py (renamed from tests/unittests/test_datasource/test_cloudsigma.py)74
-rw-r--r--tests/unittests/sources/test_cloudstack.py (renamed from tests/unittests/test_datasource/test_cloudstack.py)121
-rw-r--r--tests/unittests/sources/test_common.py123
-rw-r--r--tests/unittests/sources/test_configdrive.py1068
-rw-r--r--tests/unittests/sources/test_digitalocean.py389
-rw-r--r--tests/unittests/sources/test_ec2.py (renamed from tests/unittests/test_datasource/test_ec2.py)853
-rw-r--r--tests/unittests/sources/test_exoscale.py241
-rw-r--r--tests/unittests/sources/test_gce.py416
-rw-r--r--tests/unittests/sources/test_hetzner.py (renamed from tests/unittests/test_datasource/test_hetzner.py)124
-rw-r--r--tests/unittests/sources/test_ibmcloud.py (renamed from tests/unittests/test_datasource/test_ibmcloud.py)299
-rw-r--r--tests/unittests/sources/test_init.py994
-rw-r--r--tests/unittests/sources/test_lxd.py394
-rw-r--r--tests/unittests/sources/test_maas.py (renamed from tests/unittests/test_datasource/test_maas.py)149
-rw-r--r--tests/unittests/sources/test_nocloud.py (renamed from tests/unittests/test_datasource/test_nocloud.py)320
-rw-r--r--tests/unittests/sources/test_opennebula.py (renamed from tests/unittests/test_datasource/test_opennebula.py)890
-rw-r--r--tests/unittests/sources/test_openstack.py788
-rw-r--r--tests/unittests/sources/test_oracle.py933
-rw-r--r--tests/unittests/sources/test_ovf.py1237
-rw-r--r--tests/unittests/sources/test_rbx.py241
-rw-r--r--tests/unittests/sources/test_scaleway.py526
-rw-r--r--tests/unittests/sources/test_smartos.py (renamed from tests/unittests/test_datasource/test_smartos.py)960
-rw-r--r--tests/unittests/sources/test_upcloud.py331
-rw-r--r--tests/unittests/sources/test_vmware.py389
-rw-r--r--tests/unittests/sources/test_vultr.py339
-rw-r--r--tests/unittests/sources/vmware/__init__.py (renamed from tests/unittests/test_vmware/__init__.py)0
-rw-r--r--tests/unittests/sources/vmware/test_custom_script.py (renamed from tests/unittests/test_vmware/test_custom_script.py)63
-rw-r--r--tests/unittests/sources/vmware/test_guestcust_util.py109
-rw-r--r--tests/unittests/sources/vmware/test_vmware_config_file.py635
-rw-r--r--tests/unittests/test__init__.py193
-rw-r--r--tests/unittests/test_atomic_helper.py6
-rw-r--r--tests/unittests/test_builtin_handlers.py472
-rw-r--r--tests/unittests/test_cli.py304
-rw-r--r--tests/unittests/test_conftest.py65
-rw-r--r--tests/unittests/test_cs_util.py39
-rw-r--r--tests/unittests/test_data.py537
-rw-r--r--tests/unittests/test_datasource/test_aliyun.py218
-rw-r--r--tests/unittests/test_datasource/test_azure.py2999
-rw-r--r--tests/unittests/test_datasource/test_common.py110
-rw-r--r--tests/unittests/test_datasource/test_configdrive.py837
-rw-r--r--tests/unittests/test_datasource/test_digitalocean.py372
-rw-r--r--tests/unittests/test_datasource/test_exoscale.py211
-rw-r--r--tests/unittests/test_datasource/test_gce.py363
-rw-r--r--tests/unittests/test_datasource/test_openstack.py694
-rw-r--r--tests/unittests/test_datasource/test_ovf.py544
-rw-r--r--tests/unittests/test_datasource/test_rbx.py238
-rw-r--r--tests/unittests/test_datasource/test_scaleway.py473
-rw-r--r--tests/unittests/test_dhclient_hook.py112
-rw-r--r--tests/unittests/test_distros/test_arch.py45
-rw-r--r--tests/unittests/test_distros/test_bsd_utils.py67
-rw-r--r--tests/unittests/test_distros/test_create_users.py271
-rw-r--r--tests/unittests/test_distros/test_debian.py100
-rw-r--r--tests/unittests/test_distros/test_generic.py302
-rw-r--r--tests/unittests/test_distros/test_hosts.py45
-rw-r--r--tests/unittests/test_distros/test_user_data_normalize.py374
-rw-r--r--tests/unittests/test_dmi.py168
-rw-r--r--tests/unittests/test_ds_identify.py1634
-rw-r--r--tests/unittests/test_ec2_util.py376
-rw-r--r--tests/unittests/test_event.py26
-rw-r--r--tests/unittests/test_features.py68
-rw-r--r--tests/unittests/test_gpg.py139
-rw-r--r--tests/unittests/test_handler/test_handler_apk_configure.py299
-rw-r--r--tests/unittests/test_handler/test_handler_apt_source_v1.py626
-rw-r--r--tests/unittests/test_handler/test_handler_apt_source_v3.py1134
-rw-r--r--tests/unittests/test_handler/test_handler_bootcmd.py161
-rw-r--r--tests/unittests/test_handler/test_handler_ca_certs.py298
-rw-r--r--tests/unittests/test_handler/test_handler_chef.py280
-rw-r--r--tests/unittests/test_handler/test_handler_debug.py74
-rw-r--r--tests/unittests/test_handler/test_handler_disk_setup.py243
-rw-r--r--tests/unittests/test_handler/test_handler_etc_hosts.py70
-rw-r--r--tests/unittests/test_handler/test_handler_landscape.py130
-rw-r--r--tests/unittests/test_handler/test_handler_locale.py108
-rw-r--r--tests/unittests/test_handler/test_handler_lxd.py231
-rw-r--r--tests/unittests/test_handler/test_handler_mounts.py397
-rw-r--r--tests/unittests/test_handler/test_handler_puppet.py179
-rw-r--r--tests/unittests/test_handler/test_handler_refresh_rmc_and_interface.py109
-rw-r--r--tests/unittests/test_handler/test_handler_resizefs.py398
-rw-r--r--tests/unittests/test_handler/test_handler_resizefs_vyos.py398
-rw-r--r--tests/unittests/test_handler/test_handler_runcmd.py121
-rw-r--r--tests/unittests/test_handler/test_handler_seed_random.py221
-rw-r--r--tests/unittests/test_handler/test_handler_set_hostname.py126
-rw-r--r--tests/unittests/test_handler/test_handler_yum_add_repo.py111
-rw-r--r--tests/unittests/test_handler/test_schema.py554
-rw-r--r--tests/unittests/test_helpers.py40
-rw-r--r--tests/unittests/test_log.py14
-rw-r--r--tests/unittests/test_merging.py123
-rw-r--r--tests/unittests/test_net.py5107
-rw-r--r--tests/unittests/test_net_activators.py262
-rw-r--r--tests/unittests/test_net_freebsd.py80
-rw-r--r--tests/unittests/test_netinfo.py353
-rw-r--r--tests/unittests/test_pathprefix2dict.py28
-rw-r--r--tests/unittests/test_persistence.py127
-rw-r--r--tests/unittests/test_registry.py21
-rw-r--r--tests/unittests/test_render_cloudcfg.py91
-rw-r--r--tests/unittests/test_reporting.py373
-rw-r--r--tests/unittests/test_reporting_hyperv.py193
-rw-r--r--tests/unittests/test_rh_subscription.py234
-rw-r--r--tests/unittests/test_runs/test_merge_run.py60
-rw-r--r--tests/unittests/test_simpletable.py119
-rw-r--r--tests/unittests/test_sshutil.py1199
-rw-r--r--tests/unittests/test_stages.py568
-rw-r--r--tests/unittests/test_subp.py353
-rw-r--r--tests/unittests/test_temp_utils.py135
-rw-r--r--tests/unittests/test_templating.py103
-rw-r--r--tests/unittests/test_upgrade.py52
-rw-r--r--tests/unittests/test_url_helper.py200
-rw-r--r--tests/unittests/test_util.py2034
-rw-r--r--tests/unittests/test_version.py32
-rw-r--r--tests/unittests/test_vmware/test_guestcust_util.py98
-rw-r--r--tests/unittests/test_vmware_config_file.py529
-rw-r--r--tests/unittests/util.py145
459 files changed, 57516 insertions, 32658 deletions
diff --git a/tests/cloud_tests/__init__.py b/tests/cloud_tests/__init__.py
deleted file mode 100644
index 6c632f99..00000000
--- a/tests/cloud_tests/__init__.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Main init."""
-
-import logging
-import os
-
-BASE_DIR = os.path.dirname(os.path.abspath(__file__))
-TESTCASES_DIR = os.path.join(BASE_DIR, 'testcases')
-TEST_CONF_DIR = os.path.join(BASE_DIR, 'testcases')
-TREE_BASE = os.sep.join(BASE_DIR.split(os.sep)[:-2])
-
-# This domain contains reverse lookups for hostnames that are used.
-# The primary reason is so sudo will return quickly when it attempts
-# to look up the hostname. i9n is just short for 'integration'.
-# see also bug 1730744 for why we had to do this.
-CI_DOMAIN = "i9n.cloud-init.io"
-
-
-def _initialize_logging():
- """Configure logging for cloud_tests."""
- logger = logging.getLogger(__name__)
- logger.setLevel(logging.DEBUG)
- formatter = logging.Formatter(
- '%(asctime)s - %(pathname)s:%(funcName)s:%(lineno)s '
- '[%(levelname)s]: %(message)s')
-
- console = logging.StreamHandler()
- console.setLevel(logging.DEBUG)
- console.setFormatter(formatter)
-
- logger.addHandler(console)
-
- return logger
-
-
-LOG = _initialize_logging()
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/__main__.py b/tests/cloud_tests/__main__.py
deleted file mode 100644
index 7ee29cad..00000000
--- a/tests/cloud_tests/__main__.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Main entry point."""
-
-import argparse
-import logging
-import os
-import sys
-
-from tests.cloud_tests import args, bddeb, collect, manage, run_funcs, verify
-from tests.cloud_tests import LOG
-
-
-def configure_log(args):
- """Configure logging."""
- level = logging.INFO
- if args.verbose:
- level = logging.DEBUG
- elif args.quiet:
- level = logging.WARN
- LOG.setLevel(level)
-
-
-def main():
- """Entry point for cloud test suite."""
- # configure parser
- parser = argparse.ArgumentParser(prog='cloud_tests')
- subparsers = parser.add_subparsers(dest="subcmd")
- subparsers.required = True
-
- def add_subparser(name, description, arg_sets):
- """Add arguments to subparser."""
- subparser = subparsers.add_parser(name, help=description)
- for (_args, _kwargs) in (a for arg_set in arg_sets for a in arg_set):
- subparser.add_argument(*_args, **_kwargs)
-
- # configure subparsers
- for (name, (description, arg_sets)) in args.SUBCMDS.items():
- add_subparser(name, description,
- [args.ARG_SETS[arg_set] for arg_set in arg_sets])
-
- # parse arguments
- parsed = parser.parse_args()
-
- # process arguments
- configure_log(parsed)
- (_, arg_sets) = args.SUBCMDS[parsed.subcmd]
- for normalizer in [args.NORMALIZERS[arg_set] for arg_set in arg_sets]:
- parsed = normalizer(parsed)
- if not parsed:
- return -1
-
- # run handler
- LOG.debug('running with args: %s', parsed)
- return {
- 'bddeb': bddeb.bddeb,
- 'collect': collect.collect,
- 'create': manage.create,
- 'run': run_funcs.run,
- 'tree_collect': run_funcs.tree_collect,
- 'tree_run': run_funcs.tree_run,
- 'verify': verify.verify,
- }[parsed.subcmd](parsed)
-
-
-if __name__ == "__main__":
- if os.geteuid() == 0:
- sys.exit('Do not run as root')
- sys.exit(main())
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/args.py b/tests/cloud_tests/args.py
deleted file mode 100644
index ab345491..00000000
--- a/tests/cloud_tests/args.py
+++ /dev/null
@@ -1,304 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Argparse argument setup and sanitization."""
-
-import os
-
-from tests.cloud_tests import config, util
-from tests.cloud_tests import LOG, TREE_BASE
-
-ARG_SETS = {
- 'BDDEB': (
- (('--bddeb-args',),
- {'help': 'args to pass through to bddeb',
- 'action': 'store', 'default': None, 'required': False}),
- (('--build-os',),
- {'help': 'OS to use as build system (default is xenial)',
- 'action': 'store', 'choices': config.ENABLED_DISTROS,
- 'default': 'xenial', 'required': False}),
- (('--build-platform',),
- {'help': 'platform to use for build system (default is lxd)',
- 'action': 'store', 'choices': config.ENABLED_PLATFORMS,
- 'default': 'lxd', 'required': False}),
- (('--cloud-init',),
- {'help': 'path to base of cloud-init tree', 'metavar': 'DIR',
- 'action': 'store', 'required': False, 'default': TREE_BASE}),),
- 'COLLECT': (
- (('-p', '--platform'),
- {'help': 'platform(s) to run tests on', 'metavar': 'PLATFORM',
- 'action': 'append', 'choices': config.ENABLED_PLATFORMS,
- 'default': []}),
- (('-n', '--os-name'),
- {'help': 'the name(s) of the OS(s) to test', 'metavar': 'NAME',
- 'action': 'append', 'choices': config.ENABLED_DISTROS,
- 'default': []}),
- (('-t', '--test-config'),
- {'help': 'test config file(s) to use', 'metavar': 'FILE',
- 'action': 'append', 'default': []}),
- (('--feature-override',),
- {'help': 'feature flags override(s), <flagname>=<true/false>',
- 'action': 'append', 'default': [], 'required': False}),),
- 'CREATE': (
- (('-c', '--config'),
- {'help': 'cloud-config yaml for testcase', 'metavar': 'DATA',
- 'action': 'store', 'required': False, 'default': None}),
- (('-e', '--enable'),
- {'help': 'enable testcase', 'required': False, 'default': False,
- 'action': 'store_true'}),
- (('name',),
- {'help': 'testcase name, in format "<category>/<test>"',
- 'action': 'store'}),
- (('-d', '--description'),
- {'help': 'description of testcase', 'required': False}),
- (('-f', '--force'),
- {'help': 'overwrite already existing test', 'required': False,
- 'action': 'store_true', 'default': False}),),
- 'INTERFACE': (
- (('-v', '--verbose'),
- {'help': 'verbose output', 'action': 'store_true', 'default': False}),
- (('-q', '--quiet'),
- {'help': 'quiet output', 'action': 'store_true', 'default': False}),),
- 'OUTPUT': (
- (('-d', '--data-dir'),
- {'help': 'directory to store test data in',
- 'action': 'store', 'metavar': 'DIR', 'required': False}),
- (('--preserve-instance',),
- {'help': 'do not destroy the instance under test',
- 'action': 'store_true', 'default': False, 'required': False}),
- (('--preserve-data',),
- {'help': 'do not remove collected data after successful run',
- 'action': 'store_true', 'default': False, 'required': False}),),
- 'OUTPUT_DEB': (
- (('--deb',),
- {'help': 'path to write output deb to', 'metavar': 'FILE',
- 'action': 'store', 'required': False,
- 'default': 'cloud-init_all.deb'}),),
- 'RESULT': (
- (('-r', '--result'),
- {'help': 'file to write results to',
- 'action': 'store', 'metavar': 'FILE'}),),
- 'SETUP': (
- (('--deb',),
- {'help': 'install deb', 'metavar': 'FILE', 'action': 'store'}),
- (('--rpm',),
- {'help': 'install rpm', 'metavar': 'FILE', 'action': 'store'}),
- (('--script',),
- {'help': 'script to set up image', 'metavar': 'DATA',
- 'action': 'store'}),
- (('--repo',),
- {'help': 'repo to enable (implies -u)', 'metavar': 'NAME',
- 'action': 'store'}),
- (('--ppa',),
- {'help': 'ppa to enable (implies -u)', 'metavar': 'NAME',
- 'action': 'store'}),
- (('-u', '--upgrade'),
- {'help': 'upgrade or install cloud-init from repo',
- 'action': 'store_true', 'default': False}),
- (('--upgrade-full',),
- {'help': 'do full system upgrade from repo (implies -u)',
- 'action': 'store_true', 'default': False}),),
-
-}
-
-SUBCMDS = {
- 'bddeb': ('build cloud-init deb from tree',
- ('BDDEB', 'OUTPUT_DEB', 'INTERFACE')),
- 'collect': ('collect test data',
- ('COLLECT', 'INTERFACE', 'OUTPUT', 'RESULT', 'SETUP')),
- 'create': ('create new test case', ('CREATE', 'INTERFACE')),
- 'run': ('run test suite',
- ('COLLECT', 'INTERFACE', 'RESULT', 'OUTPUT', 'SETUP')),
- 'tree_collect': ('collect using current working tree',
- ('BDDEB', 'COLLECT', 'INTERFACE', 'OUTPUT', 'RESULT')),
- 'tree_run': ('run using current working tree',
- ('BDDEB', 'COLLECT', 'INTERFACE', 'OUTPUT', 'RESULT')),
- 'verify': ('verify test data', ('INTERFACE', 'OUTPUT', 'RESULT')),
-}
-
-
-def _empty_normalizer(args):
- """Do not normalize arguments."""
- return args
-
-
-def normalize_bddeb_args(args):
- """Normalize BDDEB arguments.
-
- @param args: parsed args
- @return_value: updated args, or None if errors encountered
- """
- # make sure cloud-init dir is accessible
- if not (args.cloud_init and os.path.isdir(args.cloud_init)):
- LOG.error('invalid cloud-init tree path')
- return None
-
- return args
-
-
-def normalize_create_args(args):
- """Normalize CREATE arguments.
-
- @param args: parsed args
- @return_value: updated args, or None if errors occurred
- """
- # ensure valid name for new test
- if len(args.name.split('/')) != 2:
- LOG.error('invalid test name: %s', args.name)
- return None
- if os.path.exists(config.name_to_path(args.name)):
- msg = 'test: {} already exists'.format(args.name)
- if args.force:
- LOG.warning('%s but ignoring due to --force', msg)
- else:
- LOG.error(msg)
- return None
-
- # ensure test config valid if specified
- if isinstance(args.config, str) and len(args.config) == 0:
- LOG.error('test config cannot be empty if specified')
- return None
-
- # ensure description valid if specified
- if (isinstance(args.description, str) and
- (len(args.description) > 70 or len(args.description) == 0)):
- LOG.error('test description must be between 1 and 70 characters')
- return None
-
- return args
-
-
-def normalize_collect_args(args):
- """Normalize COLLECT arguments.
-
- @param args: parsed args
- @return_value: updated args, or None if errors occurred
- """
- # platform should default to lxd
- if len(args.platform) == 0:
- args.platform = ['lxd']
- args.platform = util.sorted_unique(args.platform)
-
- # os name should default to all enabled
- # if os name is provided ensure that all provided are supported
- if len(args.os_name) == 0:
- args.os_name = config.ENABLED_DISTROS
- else:
- supported = config.ENABLED_DISTROS
- invalid = [os_name for os_name in args.os_name
- if os_name not in supported]
- if len(invalid) != 0:
- LOG.error('invalid os name(s): %s', invalid)
- return None
- args.os_name = util.sorted_unique(args.os_name)
-
- # test configs should default to all enabled
- # if test configs are provided, ensure that all provided are valid
- if len(args.test_config) == 0:
- args.test_config = config.list_test_configs()
- else:
- valid = []
- invalid = []
- for name in args.test_config:
- if os.path.exists(name):
- valid.append(name)
- elif os.path.exists(config.name_to_path(name)):
- valid.append(config.name_to_path(name))
- else:
- invalid.append(name)
- if len(invalid) != 0:
- LOG.error('invalid test config(s): %s', invalid)
- return None
- else:
- args.test_config = valid
- args.test_config = util.sorted_unique(args.test_config)
-
- # parse feature flag overrides and ensure all are valid
- if args.feature_override:
- overrides = args.feature_override
- args.feature_override = util.parse_conf_list(
- overrides, boolean=True, valid=config.list_feature_flags())
- if not args.feature_override:
- LOG.error('invalid feature flag override(s): %s', overrides)
- return None
- else:
- args.feature_override = {}
-
- return args
-
-
-def normalize_output_args(args):
- """Normalize OUTPUT arguments.
-
- @param args: parsed args
- @return_value: updated args, or None if errors occurred
- """
- if args.data_dir:
- args.data_dir = os.path.abspath(args.data_dir)
- if not os.path.exists(args.data_dir):
- os.mkdir(args.data_dir)
-
- if not args.data_dir:
- args.data_dir = None
-
- # ensure clean output dir if collect
- # ensure data exists if verify
- if args.subcmd == 'collect':
- if not util.is_clean_writable_dir(args.data_dir):
- LOG.error('data_dir must be empty/new and must be writable')
- return None
-
- return args
-
-
-def normalize_output_deb_args(args):
- """Normalize OUTPUT_DEB arguments.
-
- @param args: parsed args
- @return_value: updated args, or None if erros occurred
- """
- # make sure to use abspath for deb
- args.deb = os.path.abspath(args.deb)
-
- if not args.deb.endswith('.deb'):
- LOG.error('output filename does not end in ".deb"')
- return None
-
- return args
-
-
-def normalize_setup_args(args):
- """Normalize SETUP arguments.
-
- @param args: parsed args
- @return_value: updated_args, or None if errors occurred
- """
- # ensure deb or rpm valid if specified
- for pkg in (args.deb, args.rpm):
- if pkg is not None and not os.path.exists(pkg):
- LOG.error('cannot find package: %s', pkg)
- return None
-
- # if repo or ppa to be enabled run upgrade
- if args.repo or args.ppa:
- args.upgrade = True
-
- # if ppa is specified, remove leading 'ppa:' if any
- _ppa_header = 'ppa:'
- if args.ppa and args.ppa.startswith(_ppa_header):
- args.ppa = args.ppa[len(_ppa_header):]
-
- return args
-
-
-NORMALIZERS = {
- 'BDDEB': normalize_bddeb_args,
- 'COLLECT': normalize_collect_args,
- 'CREATE': normalize_create_args,
- 'INTERFACE': _empty_normalizer,
- 'OUTPUT': normalize_output_args,
- 'OUTPUT_DEB': normalize_output_deb_args,
- 'RESULT': _empty_normalizer,
- 'SETUP': normalize_setup_args,
-}
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/bddeb.py b/tests/cloud_tests/bddeb.py
deleted file mode 100644
index e45ad947..00000000
--- a/tests/cloud_tests/bddeb.py
+++ /dev/null
@@ -1,119 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Used to build a deb."""
-
-from functools import partial
-import os
-import tempfile
-
-from cloudinit import subp
-from tests.cloud_tests import (config, LOG)
-from tests.cloud_tests import platforms
-from tests.cloud_tests.stage import (PlatformComponent, run_stage, run_single)
-
-pre_reqs = ['devscripts', 'equivs', 'git', 'tar']
-
-
-def _out(cmd_res):
- """Get clean output from cmd result."""
- return cmd_res[0].decode("utf-8").strip()
-
-
-def build_deb(args, instance):
- """Build deb on system and copy out to location at args.deb.
-
- @param args: cmdline arguments
- @return_value: tuple of results and fail count
- """
- # update remote system package list and install build deps
- LOG.debug('installing pre-reqs')
- pkgs = ' '.join(pre_reqs)
- instance.execute('apt-get update && apt-get install --yes {}'.format(pkgs))
-
- # local tmpfile that must be deleted
- local_tarball = tempfile.NamedTemporaryFile().name
-
- # paths to use in remote system
- output_link = '/root/cloud-init_all.deb'
- remote_tarball = _out(instance.execute(['mktemp']))
- extract_dir = '/root'
- bddeb_path = os.path.join(extract_dir, 'packages', 'bddeb')
- git_env = {'GIT_DIR': os.path.join(extract_dir, '.git'),
- 'GIT_WORK_TREE': extract_dir}
-
- LOG.debug('creating tarball of cloud-init at: %s', local_tarball)
- subp.subp(['tar', 'cf', local_tarball, '--owner', 'root',
- '--group', 'root', '-C', args.cloud_init, '.'])
- LOG.debug('copying to remote system at: %s', remote_tarball)
- instance.push_file(local_tarball, remote_tarball)
-
- LOG.debug('extracting tarball in remote system at: %s', extract_dir)
- instance.execute(['tar', 'xf', remote_tarball, '-C', extract_dir])
- instance.execute(['git', 'commit', '-a', '-m', 'tmp', '--allow-empty'],
- env=git_env)
-
- LOG.debug('installing deps')
- deps_path = os.path.join(extract_dir, 'tools', 'read-dependencies')
- instance.execute([deps_path, '--install', '--test-distro',
- '--distro', 'ubuntu'])
-
- LOG.debug('building deb in remote system at: %s', output_link)
- bddeb_args = args.bddeb_args.split() if args.bddeb_args else []
- instance.execute([bddeb_path, '-d'] + bddeb_args, env=git_env)
-
- # copy the deb back to the host system
- LOG.debug('copying built deb to host at: %s', args.deb)
- instance.pull_file(output_link, args.deb)
-
-
-def setup_build(args):
- """Set build system up then run build.
-
- @param args: cmdline arguments
- @return_value: tuple of results and fail count
- """
- res = ({}, 1)
-
- # set up platform
- LOG.info('setting up platform: %s', args.build_platform)
- platform_config = config.load_platform_config(args.build_platform)
- platform_call = partial(platforms.get_platform, args.build_platform,
- platform_config)
- with PlatformComponent(platform_call) as platform:
-
- # set up image
- LOG.info('acquiring image for os: %s', args.build_os)
- img_conf = config.load_os_config(platform.platform_name, args.build_os)
- image_call = partial(platforms.get_image, platform, img_conf)
- with PlatformComponent(image_call) as image:
-
- # set up snapshot
- snapshot_call = partial(platforms.get_snapshot, image)
- with PlatformComponent(snapshot_call) as snapshot:
-
- # create instance with cloud-config to set it up
- LOG.info('creating instance to build deb in')
- empty_cloud_config = "#cloud-config\n{}"
- instance_call = partial(
- platforms.get_instance, snapshot, empty_cloud_config,
- use_desc='build cloud-init deb')
- with PlatformComponent(instance_call) as instance:
-
- # build the deb
- res = run_single('build deb on system',
- partial(build_deb, args, instance))
-
- return res
-
-
-def bddeb(args):
- """Entry point for build deb.
-
- @param args: cmdline arguments
- @return_value: fail count
- """
- LOG.info('preparing to build cloud-init deb')
- _res, failed = run_stage('build deb', [partial(setup_build, args)])
- return failed
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/collect.py b/tests/cloud_tests/collect.py
deleted file mode 100644
index 642745d8..00000000
--- a/tests/cloud_tests/collect.py
+++ /dev/null
@@ -1,219 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Used to collect data from platforms during tests."""
-
-from functools import partial
-import os
-
-from cloudinit import util as c_util
-from tests.cloud_tests import (config, LOG, setup_image, util)
-from tests.cloud_tests.stage import (PlatformComponent, run_stage, run_single)
-from tests.cloud_tests import platforms
-from tests.cloud_tests.testcases import base, get_test_class
-
-
-def collect_script(instance, base_dir, script, script_name):
- """Collect script data.
-
- @param instance: instance to run script on
- @param base_dir: base directory for output data
- @param script: script contents
- @param script_name: name of script to run
- @return_value: None, may raise errors
- """
- LOG.debug('running collect script: %s', script_name)
- (out, err, exit) = instance.run_script(
- script.encode(), rcs=False,
- description='collect: {}'.format(script_name))
- if err:
- LOG.debug("collect script %s exited '%s' and had stderr: %s",
- script_name, err, exit)
- if not isinstance(out, bytes):
- raise util.PlatformError(
- "Collection of '%s' returned type %s, expected bytes: %s" %
- (script_name, type(out), out))
-
- c_util.write_file(os.path.join(base_dir, script_name), out)
-
-
-def collect_console(instance, base_dir):
- """Collect instance console log.
-
- @param instance: instance to get console log for
- @param base_dir: directory to write console log to
- """
- logfile = os.path.join(base_dir, 'console.log')
- LOG.debug('getting console log for %s to %s', instance.name, logfile)
- try:
- data = instance.console_log()
- except NotImplementedError as e:
- # args[0] is hacky, but thats all I see to get at the message.
- data = b'NotImplementedError:' + e.args[0].encode()
- with open(logfile, "wb") as fp:
- fp.write(data)
-
-
-def collect_test_data(args, snapshot, os_name, test_name):
- """Collect data for test case.
-
- @param args: cmdline arguments
- @param snapshot: instantiated snapshot
- @param test_name: name or path of test to run
- @return_value: tuple of results and fail count
- """
- res = ({}, 1)
-
- # load test config
- test_name_in = test_name
- test_name = config.path_to_name(test_name)
- test_config = config.load_test_config(test_name)
- user_data = test_config['cloud_config']
- test_scripts = test_config['collect_scripts']
- test_output_dir = os.sep.join(
- (args.data_dir, snapshot.platform_name, os_name, test_name))
-
- # if test is not enabled, skip and return 0 failures
- if not test_config.get('enabled', False):
- LOG.warning('test config %s is not enabled, skipping', test_name)
- return ({}, 0)
-
- test_class = get_test_class(
- config.name_to_module(test_name_in),
- test_data={'platform': snapshot.platform_name, 'os_name': os_name},
- test_conf=test_config['cloud_config'])
- try:
- test_class.maybeSkipTest()
- except base.SkipTest as s:
- LOG.warning('skipping test config %s: %s', test_name, s)
- return ({}, 0)
-
- # if testcase requires a feature flag that the image does not support,
- # skip the testcase with a warning
- req_features = test_config.get('required_features', [])
- if any(feature not in snapshot.features for feature in req_features):
- LOG.warning('test config %s requires features not supported by image, '
- 'skipping.\nrequired features: %s\nsupported features: %s',
- test_name, req_features, snapshot.features)
- return ({}, 0)
-
- # if there are user data overrides required for this test case, apply them
- overrides = snapshot.config.get('user_data_overrides', {})
- if overrides:
- LOG.debug('updating user data for collect with: %s', overrides)
- user_data = util.update_user_data(user_data, overrides)
-
- # create test instance
- component = PlatformComponent(
- partial(platforms.get_instance, snapshot, user_data,
- block=True, start=False, use_desc=test_name),
- preserve_instance=args.preserve_instance)
-
- LOG.info('collecting test data for test: %s', test_name)
- with component as instance:
- start_call = partial(run_single, 'boot instance', partial(
- instance.start, wait=True, wait_for_cloud_init=True))
- collect_calls = [partial(run_single, 'script {}'.format(script_name),
- partial(collect_script, instance,
- test_output_dir, script, script_name))
- for script_name, script in test_scripts.items()]
-
- res = run_stage('collect for test: {}'.format(test_name),
- [start_call] + collect_calls)
-
- instance.shutdown()
- collect_console(instance, test_output_dir)
-
- return res
-
-
-def collect_snapshot(args, image, os_name):
- """Collect data for snapshot of image.
-
- @param args: cmdline arguments
- @param image: instantiated image with set up complete
- @return_value tuple of results and fail count
- """
- res = ({}, 1)
-
- component = PlatformComponent(partial(platforms.get_snapshot, image))
-
- LOG.debug('creating snapshot for %s', os_name)
- with component as snapshot:
- LOG.info('collecting test data for os: %s', os_name)
- res = run_stage(
- 'collect test data for {}'.format(os_name),
- [partial(collect_test_data, args, snapshot, os_name, test_name)
- for test_name in args.test_config])
-
- return res
-
-
-def collect_image(args, platform, os_name):
- """Collect data for image.
-
- @param args: cmdline arguments
- @param platform: instantiated platform
- @param os_name: name of distro to collect for
- @return_value: tuple of results and fail count
- """
- res = ({}, 1)
-
- os_config = config.load_os_config(
- platform.platform_name, os_name, require_enabled=True,
- feature_overrides=args.feature_override)
- LOG.debug('os config: %s', os_config)
- component = PlatformComponent(
- partial(platforms.get_image, platform, os_config))
-
- LOG.info('acquiring image for os: %s', os_name)
- with component as image:
- res = run_stage('set up and collect data for os: {}'.format(os_name),
- [partial(setup_image.setup_image, args, image)] +
- [partial(collect_snapshot, args, image, os_name)],
- continue_after_error=False)
-
- return res
-
-
-def collect_platform(args, platform_name):
- """Collect data for platform.
-
- @param args: cmdline arguments
- @param platform_name: platform to collect for
- @return_value: tuple of results and fail count
- """
- res = ({}, 1)
-
- platform_config = config.load_platform_config(
- platform_name, require_enabled=True)
- platform_config['data_dir'] = args.data_dir
- LOG.debug('platform config: %s', platform_config)
- component = PlatformComponent(
- partial(platforms.get_platform, platform_name, platform_config))
-
- LOG.info('setting up platform: %s', platform_name)
- with component as platform:
- res = run_stage('collect for platform: {}'.format(platform_name),
- [partial(collect_image, args, platform, os_name)
- for os_name in args.os_name])
-
- return res
-
-
-def collect(args):
- """Entry point for collection.
-
- @param args: cmdline arguments
- @return_value: fail count
- """
- (res, failed) = run_stage(
- 'collect data', [partial(collect_platform, args, platform_name)
- for platform_name in args.platform])
-
- LOG.debug('collect stages: %s', res)
- if args.result:
- util.merge_results({'collect_stages': res}, args.result)
-
- return failed
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/config.py b/tests/cloud_tests/config.py
deleted file mode 100644
index 06536edc..00000000
--- a/tests/cloud_tests/config.py
+++ /dev/null
@@ -1,165 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Used to setup test configuration."""
-
-import glob
-import os
-
-from cloudinit import util as c_util
-from tests.cloud_tests import (BASE_DIR, TEST_CONF_DIR)
-
-# conf files
-CONF_EXT = '.yaml'
-VERIFY_EXT = '.py'
-PLATFORM_CONF = os.path.join(BASE_DIR, 'platforms.yaml')
-RELEASES_CONF = os.path.join(BASE_DIR, 'releases.yaml')
-TESTCASE_CONF = os.path.join(BASE_DIR, 'testcases.yaml')
-
-
-def get(base, key):
- """Get config entry 'key' from base, ensuring is dictionary."""
- return base[key] if key in base and base[key] is not None else {}
-
-
-def enabled(config):
- """Test if config item is enabled."""
- return isinstance(config, dict) and config.get('enabled', False)
-
-
-def path_to_name(path):
- """Convert abs or rel path to test config to path under 'sconfigs/'."""
- dir_path, file_name = os.path.split(os.path.normpath(path))
- name = os.path.splitext(file_name)[0]
- return os.sep.join((os.path.basename(dir_path), name))
-
-
-def name_to_path(name):
- """Convert test config path under configs/ to full config path."""
- name = os.path.normpath(name)
- if not name.endswith(CONF_EXT):
- name = name + CONF_EXT
- return name if os.path.isabs(name) else os.path.join(TEST_CONF_DIR, name)
-
-
-def name_sanitize(name):
- """Sanitize test name to be used as a module name."""
- return name.replace('-', '_')
-
-
-def name_to_module(name):
- """Convert test name to a loadable module name under 'testcases/'."""
- name = name_sanitize(path_to_name(name))
- return name.replace(os.path.sep, '.')
-
-
-def merge_config(base, override):
- """Merge config and base."""
- res = base.copy()
- res.update(override)
- res.update({k: merge_config(base.get(k, {}), v)
- for k, v in override.items() if isinstance(v, dict)})
- return res
-
-
-def merge_feature_groups(feature_conf, feature_groups, overrides):
- """Combine feature groups and overrides to construct a supported list.
-
- @param feature_conf: feature config from releases.yaml
- @param feature_groups: feature groups the release is a member of
- @param overrides: overrides specified by the release's config
- @return_value: dict of {feature: true/false} settings
- """
- res = dict().fromkeys(feature_conf['all'])
- for group in feature_groups:
- res.update(feature_conf['groups'][group])
- res.update(overrides)
- return res
-
-
-def load_platform_config(platform_name, require_enabled=False):
- """Load configuration for platform.
-
- @param platform_name: name of platform to retrieve config for
- @param require_enabled: if true, raise error if 'enabled' not True
- @return_value: config dict
- """
- main_conf = c_util.read_conf(PLATFORM_CONF)
- conf = merge_config(main_conf['default_platform_config'],
- main_conf['platforms'][platform_name])
- if require_enabled and not enabled(conf):
- raise ValueError('Platform is not enabled')
- return conf
-
-
-def load_os_config(platform_name, os_name, require_enabled=False,
- feature_overrides=None):
- """Load configuration for os.
-
- @param platform_name: platform name to load os config for
- @param os_name: name of os to retrieve config for
- @param require_enabled: if true, raise error if 'enabled' not True
- @param feature_overrides: feature flag overrides to merge with features
- @return_value: config dict
- """
- if feature_overrides is None:
- feature_overrides = {}
- main_conf = c_util.read_conf(RELEASES_CONF)
- default = main_conf['default_release_config']
- image = main_conf['releases'][os_name]
- conf = merge_config(merge_config(get(default, 'default'),
- get(default, platform_name)),
- merge_config(get(image, 'default'),
- get(image, platform_name)))
-
- feature_conf = main_conf['features']
- feature_groups = conf.get('feature_groups', [])
- overrides = merge_config(get(conf, 'features'), feature_overrides)
- conf['arch'] = c_util.get_dpkg_architecture()
- conf['features'] = merge_feature_groups(
- feature_conf, feature_groups, overrides)
-
- if require_enabled and not enabled(conf):
- raise ValueError('OS is not enabled')
- return conf
-
-
-def load_test_config(path):
- """Load a test config file by either abs path or rel path."""
- return merge_config(c_util.read_conf(TESTCASE_CONF)['base_test_data'],
- c_util.read_conf(name_to_path(path)))
-
-
-def list_feature_flags():
- """List all supported feature flags."""
- feature_conf = get(c_util.read_conf(RELEASES_CONF), 'features')
- return feature_conf.get('all', [])
-
-
-def list_enabled_platforms():
- """List all platforms enabled for testing."""
- platforms = get(c_util.read_conf(PLATFORM_CONF), 'platforms')
- return [k for k, v in platforms.items() if enabled(v)]
-
-
-def list_enabled_distros(platforms):
- """List all distros enabled for testing on specified platforms."""
- def platform_has_enabled(config):
- """List if platform is enabled."""
- return any(enabled(merge_config(get(config, 'default'),
- get(config, platform)))
- for platform in platforms)
-
- releases = get(c_util.read_conf(RELEASES_CONF), 'releases')
- return [k for k, v in releases.items() if platform_has_enabled(v)]
-
-
-def list_test_configs():
- """List all available test config files by abspath."""
- return [os.path.abspath(f) for f in
- glob.glob(os.sep.join((TEST_CONF_DIR, '*', '*.yaml')))]
-
-
-ENABLED_PLATFORMS = sorted(list_enabled_platforms())
-ENABLED_DISTROS = sorted(list_enabled_distros(ENABLED_PLATFORMS))
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/manage.py b/tests/cloud_tests/manage.py
deleted file mode 100644
index 5f0cfd23..00000000
--- a/tests/cloud_tests/manage.py
+++ /dev/null
@@ -1,74 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Create test cases automatically given a user_data script."""
-
-import os
-import textwrap
-
-from cloudinit import util as c_util
-from tests.cloud_tests.config import VERIFY_EXT
-from tests.cloud_tests import (config, util)
-from tests.cloud_tests import TESTCASES_DIR
-
-
-_verifier_fmt = textwrap.dedent(
- """
- \"\"\"cloud-init Integration Test Verify Script\"\"\"
- from tests.cloud_tests.testcases import base
-
-
- class {test_class}(base.CloudTestCase):
- \"\"\"
- Name: {test_name}
- Category: {test_category}
- Description: {test_description}
- \"\"\"
- pass
- """
-).lstrip()
-_config_fmt = textwrap.dedent(
- """
- #
- # Name: {test_name}
- # Category: {test_category}
- # Description: {test_description}
- #
- {config}
- """
-).strip()
-
-
-def write_testcase_config(args, fmt_args, testcase_file):
- """Write the testcase config file."""
- testcase_config = {'enabled': args.enable, 'collect_scripts': {}}
- if args.config:
- testcase_config['cloud_config'] = args.config
- fmt_args['config'] = util.yaml_format(testcase_config)
- c_util.write_file(testcase_file, _config_fmt.format(**fmt_args), omode='w')
-
-
-def write_verifier(args, fmt_args, verifier_file):
- """Write the verifier script."""
- fmt_args['test_class'] = 'Test{}'.format(
- config.name_sanitize(fmt_args['test_name']).title())
- c_util.write_file(verifier_file,
- _verifier_fmt.format(**fmt_args), omode='w')
-
-
-def create(args):
- """Create a new testcase."""
- (test_category, test_name) = args.name.split('/')
- fmt_args = {'test_name': test_name, 'test_category': test_category,
- 'test_description': str(args.description)}
-
- testcase_file = config.name_to_path(args.name)
- verifier_file = os.path.join(
- TESTCASES_DIR, test_category,
- config.name_sanitize(test_name) + VERIFY_EXT)
-
- write_testcase_config(args, fmt_args, testcase_file)
- write_verifier(args, fmt_args, verifier_file)
-
- return 0
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms.yaml b/tests/cloud_tests/platforms.yaml
deleted file mode 100644
index eaaa0a71..00000000
--- a/tests/cloud_tests/platforms.yaml
+++ /dev/null
@@ -1,77 +0,0 @@
-# ============================= Platform Config ===============================
-default_platform_config:
- # all disabled by default
- enabled: false
- # maximum time to retrieve image
- get_image_timeout: 300
- # maximum time to create instance (before waiting for cloud-init)
- create_instance_timeout: 60
- private_key: cloud_init_rsa
- public_key: cloud_init_rsa.pub
-platforms:
- ec2:
- enabled: true
- instance-type: t2.micro
- tag: cii
- lxd:
- enabled: true
- # overrides for image templates
- template_overrides:
- /var/lib/cloud/seed/nocloud-net/meta-data:
- when:
- - create
- - copy
- template: cloud-init-meta.tpl
- /var/lib/cloud/seed/nocloud-net/network-config:
- when:
- - create
- - copy
- template: cloud-init-network.tpl
- /var/lib/cloud/seed/nocloud-net/user-data:
- when:
- - create
- - copy
- template: cloud-init-user.tpl
- properties:
- default: |
- #cloud-config
- {}
- /var/lib/cloud/seed/nocloud-net/vendor-data:
- when:
- - create
- - copy
- template: cloud-init-vendor.tpl
- properties:
- default: |
- #cloud-config
- {}
- # overrides image template files
- template_files:
- cloud-init-meta.tpl: |
- #cloud-config
- instance-id: {{ container.name }}
- local-hostname: {{ container.name }}
- {{ config_get("user.meta-data", "") }}
- cloud-init-network.tpl: |
- {% if config_get("user.network-config", "") == "" %}version: 1
- config:
- - type: physical
- name: eth0
- subnets:
- - type: {% if config_get("user.network_mode", "") == "link-local" %}manual{% else %}dhcp{% endif %}
- control: auto{% else %}{{ config_get("user.network-config", "") }}{% endif %}
- cloud-init-user.tpl: |
- {{ config_get("user.user-data", properties.default) }}
- cloud-init-vendor.tpl: |
- {{ config_get("user.vendor-data", properties.default) }}
- nocloud-kvm:
- enabled: true
- cache_mode: cache=none,aio=native
- azurecloud:
- enabled: true
- region: West US 2
- vm_size: Standard_DS1_v2
- storage_sku: standard_lrs
- tag: ci
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/__init__.py b/tests/cloud_tests/platforms/__init__.py
deleted file mode 100644
index e506baa0..00000000
--- a/tests/cloud_tests/platforms/__init__.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Main init."""
-
-from .ec2 import platform as ec2
-from .lxd import platform as lxd
-from .nocloudkvm import platform as nocloudkvm
-from .azurecloud import platform as azurecloud
-from ..util import emit_dots_on_travis
-
-PLATFORMS = {
- 'ec2': ec2.EC2Platform,
- 'nocloud-kvm': nocloudkvm.NoCloudKVMPlatform,
- 'lxd': lxd.LXDPlatform,
- 'azurecloud': azurecloud.AzureCloudPlatform,
-}
-
-
-def get_image(platform, config):
- """Get image from platform object using os_name."""
- with emit_dots_on_travis():
- return platform.get_image(config)
-
-
-def get_instance(snapshot, *args, **kwargs):
- """Get instance from snapshot."""
- return snapshot.launch(*args, **kwargs)
-
-
-def get_platform(platform_name, config):
- """Get the platform object for 'platform_name' and init."""
- platform_cls = PLATFORMS.get(platform_name)
- if not platform_cls:
- raise ValueError('invalid platform name: {}'.format(platform_name))
- return platform_cls(config)
-
-
-def get_snapshot(image):
- """Get snapshot from image."""
- return image.snapshot()
-
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/azurecloud/image.py b/tests/cloud_tests/platforms/azurecloud/image.py
deleted file mode 100644
index aad2bca1..00000000
--- a/tests/cloud_tests/platforms/azurecloud/image.py
+++ /dev/null
@@ -1,116 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Azure Cloud image Base class."""
-
-from tests.cloud_tests import LOG
-
-from ..images import Image
-from .snapshot import AzureCloudSnapshot
-
-
-class AzureCloudImage(Image):
- """Azure Cloud backed image."""
-
- platform_name = 'azurecloud'
-
- def __init__(self, platform, config, image_id):
- """Set up image.
-
- @param platform: platform object
- @param config: image configuration
- @param image_id: image id used to boot instance
- """
- super(AzureCloudImage, self).__init__(platform, config)
- self._img_instance = None
- self.image_id = image_id
-
- @property
- def _instance(self):
- """Internal use only, returns a running instance"""
- if not self._img_instance:
- self._img_instance = self.platform.create_instance(
- self.properties, self.config, self.features,
- self.image_id, user_data=None)
- self._img_instance.start(wait=True, wait_for_cloud_init=True)
- return self._img_instance
-
- def destroy(self):
- """Delete the instance used to create a custom image."""
- if self._img_instance:
- LOG.debug('Deleting backing instance %s',
- self._img_instance.vm_name)
- delete_vm = self.platform.compute_client.virtual_machines.delete(
- self.platform.resource_group.name, self._img_instance.vm_name)
- delete_vm.wait()
-
- super(AzureCloudImage, self).destroy()
-
- def _execute(self, *args, **kwargs):
- """Execute command in image, modifying image."""
- LOG.debug('executing commands on image')
- self._instance.start(wait=True)
- return self._instance._execute(*args, **kwargs)
-
- def push_file(self, local_path, remote_path):
- """Copy file at 'local_path' to instance at 'remote_path'."""
- LOG.debug('pushing file to image')
- return self._instance.push_file(local_path, remote_path)
-
- def run_script(self, *args, **kwargs):
- """Run script in image, modifying image.
-
- @return_value: script output
- """
- LOG.debug('running script on image')
- self._instance.start()
- return self._instance.run_script(*args, **kwargs)
-
- def snapshot(self):
- """ Create snapshot (image) of instance, wait until done.
-
- If no instance has been booted, base image is returned.
- Otherwise runs the clean script, deallocates, generalizes
- and creates custom image from instance.
- """
- LOG.debug('creating snapshot of image')
- if not self._img_instance:
- LOG.debug('No existing image, snapshotting base image')
- return AzureCloudSnapshot(self.platform, self.properties,
- self.config, self.features,
- self._instance.vm_name,
- delete_on_destroy=False)
-
- LOG.debug('creating snapshot from instance: %s', self._img_instance)
- if self.config.get('boot_clean_script'):
- self._img_instance.run_script(self.config.get('boot_clean_script'))
-
- LOG.debug('deallocating instance %s', self._instance.vm_name)
- deallocate = self.platform.compute_client.virtual_machines.deallocate(
- self.platform.resource_group.name, self._instance.vm_name)
- deallocate.wait()
-
- LOG.debug('generalizing instance %s', self._instance.vm_name)
- self.platform.compute_client.virtual_machines.generalize(
- self.platform.resource_group.name, self._instance.vm_name)
-
- image_params = {
- "location": self.platform.location,
- "properties": {
- "sourceVirtualMachine": {
- "id": self._img_instance.instance.id
- }
- }
- }
- LOG.debug('updating resource group image %s', self._instance.vm_name)
- self.platform.compute_client.images.create_or_update(
- self.platform.resource_group.name, self._instance.vm_name,
- image_params)
-
- LOG.debug('destroying self')
- self.destroy()
-
- LOG.debug('snapshot complete')
- return AzureCloudSnapshot(self.platform, self.properties, self.config,
- self.features, self._instance.vm_name)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/azurecloud/instance.py b/tests/cloud_tests/platforms/azurecloud/instance.py
deleted file mode 100644
index eedbaae8..00000000
--- a/tests/cloud_tests/platforms/azurecloud/instance.py
+++ /dev/null
@@ -1,247 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base Azure Cloud instance."""
-
-from datetime import datetime, timedelta
-from urllib.parse import urlparse
-from time import sleep
-import traceback
-import os
-
-
-# pylint: disable=no-name-in-module
-from azure.storage.blob import BlockBlobService, BlobPermissions
-from msrestazure.azure_exceptions import CloudError
-
-from tests.cloud_tests import LOG
-
-from ..instances import Instance
-
-
-class AzureCloudInstance(Instance):
- """Azure Cloud backed instance."""
-
- platform_name = 'azurecloud'
-
- def __init__(self, platform, properties, config,
- features, image_id, user_data=None):
- """Set up instance.
-
- @param platform: platform object
- @param properties: dictionary of properties
- @param config: dictionary of configuration values
- @param features: dictionary of supported feature flags
- @param image_id: image to find and/or use
- @param user_data: test user-data to pass to instance
- """
- super(AzureCloudInstance, self).__init__(
- platform, image_id, properties, config, features)
-
- self.ssh_port = 22
- self.ssh_ip = None
- self.instance = None
- self.image_id = image_id
- self.vm_name = 'ci-azure-i-%s' % self.platform.tag
- self.user_data = user_data
- self.ssh_key_file = os.path.join(
- platform.config['data_dir'], platform.config['private_key'])
- self.ssh_pubkey_file = os.path.join(
- platform.config['data_dir'], platform.config['public_key'])
- self.blob_client, self.container, self.blob = None, None, None
-
- def start(self, wait=True, wait_for_cloud_init=False):
- """Start instance with the platforms NIC."""
- if self.instance:
- return
- data = self.image_id.split('-')
- release, support = data[2].replace('_', '.'), data[3]
- sku = '%s-%s' % (release, support) if support == 'LTS' else release
- image_resource_id = '/subscriptions/%s' \
- '/resourceGroups/%s' \
- '/providers/Microsoft.Compute/images/%s' % (
- self.platform.subscription_id,
- self.platform.resource_group.name,
- self.image_id)
- storage_uri = "http://%s.blob.core.windows.net" \
- % self.platform.storage.name
- with open(self.ssh_pubkey_file, 'r') as key:
- ssh_pub_keydata = key.read()
-
- image_exists = False
- try:
- LOG.debug('finding image in resource group using image_id')
- self.platform.compute_client.images.get(
- self.platform.resource_group.name,
- self.image_id
- )
- image_exists = True
- LOG.debug('image found, launching instance, image_id=%s',
- self.image_id)
- except CloudError:
- LOG.debug(('image not found, launching instance with base image, '
- 'image_id=%s'), self.image_id)
-
- vm_params = {
- 'name': self.vm_name,
- 'location': self.platform.location,
- 'os_profile': {
- 'computer_name': 'CI-%s' % self.platform.tag,
- 'admin_username': self.ssh_username,
- "customData": self.user_data,
- "linuxConfiguration": {
- "disable_password_authentication": True,
- "ssh": {
- "public_keys": [{
- "path": "/home/%s/.ssh/authorized_keys" %
- self.ssh_username,
- "keyData": ssh_pub_keydata
- }]
- }
- }
- },
- "diagnosticsProfile": {
- "bootDiagnostics": {
- "storageUri": storage_uri,
- "enabled": True
- }
- },
- 'hardware_profile': {
- 'vm_size': self.platform.vm_size
- },
- 'storage_profile': {
- 'image_reference': {
- 'id': image_resource_id
- } if image_exists else {
- 'publisher': 'Canonical',
- 'offer': 'UbuntuServer',
- 'sku': sku,
- 'version': 'latest'
- }
- },
- 'network_profile': {
- 'network_interfaces': [{
- 'id': self.platform.nic.id
- }]
- },
- 'tags': {
- 'Name': self.platform.tag,
- }
- }
-
- try:
- self.instance = self.platform.compute_client.virtual_machines.\
- create_or_update(self.platform.resource_group.name,
- self.vm_name, vm_params)
- LOG.debug('creating instance %s from image_id=%s', self.vm_name,
- self.image_id)
- except CloudError as e:
- raise RuntimeError(
- 'failed creating instance:\n{}'.format(traceback.format_exc())
- ) from e
-
- if wait:
- self.instance.wait()
- self.ssh_ip = self.platform.network_client.\
- public_ip_addresses.get(
- self.platform.resource_group.name,
- self.platform.public_ip.name
- ).ip_address
- self._wait_for_system(wait_for_cloud_init)
-
- self.instance = self.instance.result()
- self.blob_client, self.container, self.blob =\
- self._get_blob_client()
-
- def shutdown(self, wait=True):
- """Finds console log then stopping/deallocates VM"""
- LOG.debug('waiting on console log before stopping')
- attempts, exists = 5, False
- while not exists and attempts:
- try:
- attempts -= 1
- exists = self.blob_client.get_blob_to_bytes(
- self.container, self.blob)
- LOG.debug('found console log')
- except Exception as e:
- if attempts:
- LOG.debug('Unable to find console log, '
- '%s attempts remaining', attempts)
- sleep(15)
- else:
- LOG.warning('Could not find console log: %s', e)
-
- LOG.debug('stopping instance %s', self.image_id)
- vm_deallocate = \
- self.platform.compute_client.virtual_machines.deallocate(
- self.platform.resource_group.name, self.image_id)
- if wait:
- vm_deallocate.wait()
-
- def destroy(self):
- """Delete VM and close all connections"""
- if self.instance:
- LOG.debug('destroying instance: %s', self.image_id)
- vm_delete = self.platform.compute_client.virtual_machines.delete(
- self.platform.resource_group.name, self.image_id)
- vm_delete.wait()
-
- self._ssh_close()
-
- super(AzureCloudInstance, self).destroy()
-
- def _execute(self, command, stdin=None, env=None):
- """Execute command on instance."""
- env_args = []
- if env:
- env_args = ['env'] + ["%s=%s" for k, v in env.items()]
-
- return self._ssh(['sudo'] + env_args + list(command), stdin=stdin)
-
- def _get_blob_client(self):
- """
- Use VM details to retrieve container and blob name.
- Then Create blob service client for sas token to
- retrieve console log.
-
- :return: blob service, container name, blob name
- """
- LOG.debug('creating blob service for console log')
- storage = self.platform.storage_client.storage_accounts.get_properties(
- self.platform.resource_group.name, self.platform.storage.name)
-
- keys = self.platform.storage_client.storage_accounts.list_keys(
- self.platform.resource_group.name, self.platform.storage.name
- ).keys[0].value
-
- virtual_machine = self.platform.compute_client.virtual_machines.get(
- self.platform.resource_group.name, self.instance.name,
- expand='instanceView')
-
- blob_uri = virtual_machine.instance_view.boot_diagnostics.\
- serial_console_log_blob_uri
-
- container, blob = urlparse(blob_uri).path.split('/')[-2:]
-
- blob_client = BlockBlobService(
- account_name=storage.name,
- account_key=keys)
-
- sas = blob_client.generate_blob_shared_access_signature(
- container_name=container, blob_name=blob, protocol='https',
- expiry=datetime.utcnow() + timedelta(hours=1),
- permission=BlobPermissions.READ)
-
- blob_client = BlockBlobService(
- account_name=storage.name,
- sas_token=sas)
-
- return blob_client, container, blob
-
- def console_log(self):
- """Instance console.
-
- @return_value: bytes of this instance’s console
- """
- boot_diagnostics = self.blob_client.get_blob_to_bytes(
- self.container, self.blob)
- return boot_diagnostics.content
diff --git a/tests/cloud_tests/platforms/azurecloud/platform.py b/tests/cloud_tests/platforms/azurecloud/platform.py
deleted file mode 100644
index a664f612..00000000
--- a/tests/cloud_tests/platforms/azurecloud/platform.py
+++ /dev/null
@@ -1,240 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base Azure Cloud class."""
-
-import os
-import base64
-import traceback
-from datetime import datetime
-from tests.cloud_tests import LOG
-
-# pylint: disable=no-name-in-module
-from azure.common.credentials import ServicePrincipalCredentials
-# pylint: disable=no-name-in-module
-from azure.mgmt.resource import ResourceManagementClient
-# pylint: disable=no-name-in-module
-from azure.mgmt.network import NetworkManagementClient
-# pylint: disable=no-name-in-module
-from azure.mgmt.compute import ComputeManagementClient
-# pylint: disable=no-name-in-module
-from azure.mgmt.storage import StorageManagementClient
-from msrestazure.azure_exceptions import CloudError
-
-from .image import AzureCloudImage
-from .instance import AzureCloudInstance
-from ..platforms import Platform
-
-from cloudinit import util as c_util
-
-
-class AzureCloudPlatform(Platform):
- """Azure Cloud test platforms."""
-
- platform_name = 'azurecloud'
-
- def __init__(self, config):
- """Set up platform."""
- super(AzureCloudPlatform, self).__init__(config)
- self.tag = '%s-%s' % (
- config['tag'], datetime.now().strftime('%Y%m%d%H%M%S'))
- self.storage_sku = config['storage_sku']
- self.vm_size = config['vm_size']
- self.location = config['region']
-
- try:
- self.credentials, self.subscription_id = self._get_credentials()
-
- self.resource_client = ResourceManagementClient(
- self.credentials, self.subscription_id)
- self.compute_client = ComputeManagementClient(
- self.credentials, self.subscription_id)
- self.network_client = NetworkManagementClient(
- self.credentials, self.subscription_id)
- self.storage_client = StorageManagementClient(
- self.credentials, self.subscription_id)
-
- self.resource_group = self._create_resource_group()
- self.public_ip = self._create_public_ip_address()
- self.storage = self._create_storage_account(config)
- self.vnet = self._create_vnet()
- self.subnet = self._create_subnet()
- self.nic = self._create_nic()
- except CloudError as e:
- raise RuntimeError(
- 'failed creating a resource:\n{}'.format(
- traceback.format_exc()
- )
- ) from e
-
- def create_instance(self, properties, config, features,
- image_id, user_data=None):
- """Create an instance
-
- @param properties: image properties
- @param config: image configuration
- @param features: image features
- @param image_id: string of image id
- @param user_data: test user-data to pass to instance
- @return_value: cloud_tests.instances instance
- """
- if user_data is not None:
- user_data = str(base64.b64encode(
- user_data.encode('utf-8')), 'utf-8')
-
- return AzureCloudInstance(self, properties, config, features,
- image_id, user_data)
-
- def get_image(self, img_conf):
- """Get image using specified image configuration.
-
- @param img_conf: configuration for image
- @return_value: cloud_tests.images instance
- """
- ss_region = self.azure_location_to_simplestreams_region()
-
- filters = [
- 'arch=%s' % 'amd64',
- 'endpoint=https://management.core.windows.net/',
- 'region=%s' % ss_region,
- 'release=%s' % img_conf['release']
- ]
-
- LOG.debug('finding image using streams')
- image = self._query_streams(img_conf, filters)
-
- try:
- image_id = image['id']
- LOG.debug('found image: %s', image_id)
- if image_id.find('__') > 0:
- image_id = image_id.split('__')[1]
- LOG.debug('image_id shortened to %s', image_id)
- except KeyError as e:
- raise RuntimeError(
- 'no images found for %s' % img_conf['release']
- ) from e
-
- return AzureCloudImage(self, img_conf, image_id)
-
- def destroy(self):
- """Delete all resources in resource group."""
- LOG.debug("Deleting resource group: %s", self.resource_group.name)
- delete = self.resource_client.resource_groups.delete(
- self.resource_group.name)
- delete.wait()
-
- def azure_location_to_simplestreams_region(self):
- """Convert location to simplestreams region"""
- location = self.location.lower().replace(' ', '')
- LOG.debug('finding location %s using simple streams', location)
- regions_file = os.path.join(
- os.path.dirname(os.path.abspath(__file__)), 'regions.json')
- region_simplestreams_map = c_util.load_json(
- c_util.load_file(regions_file))
- return region_simplestreams_map.get(location, location)
-
- def _get_credentials(self):
- """Get credentials from environment"""
- LOG.debug('getting credentials from environment')
- cred_file = os.path.expanduser('~/.azure/credentials.json')
- try:
- azure_creds = c_util.load_json(
- c_util.load_file(cred_file))
- subscription_id = azure_creds['subscriptionId']
- credentials = ServicePrincipalCredentials(
- client_id=azure_creds['clientId'],
- secret=azure_creds['clientSecret'],
- tenant=azure_creds['tenantId'])
- return credentials, subscription_id
- except KeyError as e:
- raise RuntimeError(
- 'Please configure Azure service principal'
- ' credentials in %s' % cred_file
- ) from e
-
- def _create_resource_group(self):
- """Create resource group"""
- LOG.debug('creating resource group')
- resource_group_name = self.tag
- resource_group_params = {
- 'location': self.location
- }
- resource_group = self.resource_client.resource_groups.create_or_update(
- resource_group_name, resource_group_params)
- return resource_group
-
- def _create_storage_account(self, config):
- LOG.debug('creating storage account')
- storage_account_name = 'storage%s' % datetime.now().\
- strftime('%Y%m%d%H%M%S')
- storage_params = {
- 'sku': {
- 'name': config['storage_sku']
- },
- 'kind': "Storage",
- 'location': self.location
- }
- storage_account = self.storage_client.storage_accounts.create(
- self.resource_group.name, storage_account_name, storage_params)
- return storage_account.result()
-
- def _create_public_ip_address(self):
- """Create public ip address"""
- LOG.debug('creating public ip address')
- public_ip_name = '%s-ip' % self.resource_group.name
- public_ip_params = {
- 'location': self.location,
- 'public_ip_allocation_method': 'Dynamic'
- }
- ip = self.network_client.public_ip_addresses.create_or_update(
- self.resource_group.name, public_ip_name, public_ip_params)
- return ip.result()
-
- def _create_vnet(self):
- """create virtual network"""
- LOG.debug('creating vnet')
- vnet_name = '%s-vnet' % self.resource_group.name
- vnet_params = {
- 'location': self.location,
- 'address_space': {
- 'address_prefixes': ['10.0.0.0/16']
- }
- }
- vnet = self.network_client.virtual_networks.create_or_update(
- self.resource_group.name, vnet_name, vnet_params)
- return vnet.result()
-
- def _create_subnet(self):
- """create sub-network"""
- LOG.debug('creating subnet')
- subnet_name = '%s-subnet' % self.resource_group.name
- subnet_params = {
- 'address_prefix': '10.0.0.0/24'
- }
- subnet = self.network_client.subnets.create_or_update(
- self.resource_group.name, self.vnet.name,
- subnet_name, subnet_params)
- return subnet.result()
-
- def _create_nic(self):
- """Create network interface controller"""
- LOG.debug('creating nic')
- nic_name = '%s-nic' % self.resource_group.name
- nic_params = {
- 'location': self.location,
- 'ip_configurations': [{
- 'name': 'ipconfig',
- 'subnet': {
- 'id': self.subnet.id
- },
- 'publicIpAddress': {
- 'id': "/subscriptions/%s"
- "/resourceGroups/%s/providers/Microsoft.Network"
- "/publicIPAddresses/%s" % (
- self.subscription_id, self.resource_group.name,
- self.public_ip.name),
- }
- }]
- }
- nic = self.network_client.network_interfaces.create_or_update(
- self.resource_group.name, nic_name, nic_params)
- return nic.result()
diff --git a/tests/cloud_tests/platforms/azurecloud/regions.json b/tests/cloud_tests/platforms/azurecloud/regions.json
deleted file mode 100644
index c1b4da20..00000000
--- a/tests/cloud_tests/platforms/azurecloud/regions.json
+++ /dev/null
@@ -1,42 +0,0 @@
-{
- "eastasia": "East Asia",
- "southeastasia": "Southeast Asia",
- "centralus": "Central US",
- "eastus": "East US",
- "eastus2": "East US 2",
- "westus": "West US",
- "northcentralus": "North Central US",
- "southcentralus": "South Central US",
- "northeurope": "North Europe",
- "westeurope": "West Europe",
- "japanwest": "Japan West",
- "japaneast": "Japan East",
- "brazilsouth": "Brazil South",
- "australiaeast": "Australia East",
- "australiasoutheast": "Australia Southeast",
- "southindia": "South India",
- "centralindia": "Central India",
- "westindia": "West India",
- "canadacentral": "Canada Central",
- "canadaeast": "Canada East",
- "uksouth": "UK South",
- "ukwest": "UK West",
- "westcentralus": "West Central US",
- "westus2": "West US 2",
- "koreacentral": "Korea Central",
- "koreasouth": "Korea South",
- "francecentral": "France Central",
- "francesouth": "France South",
- "australiacentral": "Australia Central",
- "australiacentral2": "Australia Central 2",
- "uaecentral": "UAE Central",
- "uaenorth": "UAE North",
- "southafricanorth": "South Africa North",
- "southafricawest": "South Africa West",
- "switzerlandnorth": "Switzerland North",
- "switzerlandwest": "Switzerland West",
- "germanynorth": "Germany North",
- "germanywestcentral": "Germany West Central",
- "norwaywest": "Norway West",
- "norwayeast": "Norway East"
-}
diff --git a/tests/cloud_tests/platforms/azurecloud/snapshot.py b/tests/cloud_tests/platforms/azurecloud/snapshot.py
deleted file mode 100644
index 580cc596..00000000
--- a/tests/cloud_tests/platforms/azurecloud/snapshot.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base Azure Cloud snapshot."""
-
-from ..snapshots import Snapshot
-
-from tests.cloud_tests import LOG
-
-
-class AzureCloudSnapshot(Snapshot):
- """Azure Cloud image copy backed snapshot."""
-
- platform_name = 'azurecloud'
-
- def __init__(self, platform, properties, config, features, image_id,
- delete_on_destroy=True):
- """Set up snapshot.
-
- @param platform: platform object
- @param properties: image properties
- @param config: image config
- @param features: supported feature flags
- """
- super(AzureCloudSnapshot, self).__init__(
- platform, properties, config, features)
-
- self.image_id = image_id
- self.delete_on_destroy = delete_on_destroy
-
- def launch(self, user_data, meta_data=None, block=True, start=True,
- use_desc=None):
- """Launch instance.
-
- @param user_data: user-data for the instance
- @param meta_data: meta_data for the instance
- @param block: wait until instance is created
- @param start: start instance and wait until fully started
- @param use_desc: description of snapshot instance use
- @return_value: an Instance
- """
- if meta_data is not None:
- raise ValueError("metadata not supported on Azure Cloud tests")
-
- instance = self.platform.create_instance(
- self.properties, self.config, self.features,
- self.image_id, user_data)
-
- return instance
-
- def destroy(self):
- """Clean up snapshot data."""
- LOG.debug('destroying image %s', self.image_id)
- if self.delete_on_destroy:
- self.platform.compute_client.images.delete(
- self.platform.resource_group.name,
- self.image_id)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/ec2/image.py b/tests/cloud_tests/platforms/ec2/image.py
deleted file mode 100644
index d7b2c908..00000000
--- a/tests/cloud_tests/platforms/ec2/image.py
+++ /dev/null
@@ -1,100 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""EC2 Image Base Class."""
-
-from ..images import Image
-from .snapshot import EC2Snapshot
-
-from tests.cloud_tests import LOG
-
-
-class EC2Image(Image):
- """EC2 backed image."""
-
- platform_name = 'ec2'
-
- def __init__(self, platform, config, image_ami):
- """Set up image.
-
- @param platform: platform object
- @param config: image configuration
- @param image_ami: string of image ami ID
- """
- super(EC2Image, self).__init__(platform, config)
- self._img_instance = None
- self.image_ami = image_ami
-
- @property
- def _instance(self):
- """Internal use only, returns a running instance"""
- if not self._img_instance:
- self._img_instance = self.platform.create_instance(
- self.properties, self.config, self.features,
- self.image_ami, user_data=None)
- self._img_instance.start(wait=True, wait_for_cloud_init=True)
- return self._img_instance
-
- def destroy(self):
- """Delete the instance used to create a custom image."""
- if self._img_instance:
- LOG.debug('terminating backing instance %s',
- self._img_instance.instance.instance_id)
- self._img_instance.instance.terminate()
- self._img_instance.instance.wait_until_terminated()
-
- super(EC2Image, self).destroy()
-
- def _execute(self, *args, **kwargs):
- """Execute command in image, modifying image."""
- self._instance.start(wait=True)
- return self._instance._execute(*args, **kwargs)
-
- def push_file(self, local_path, remote_path):
- """Copy file at 'local_path' to instance at 'remote_path'."""
- self._instance.start(wait=True)
- return self._instance.push_file(local_path, remote_path)
-
- def run_script(self, *args, **kwargs):
- """Run script in image, modifying image.
-
- @return_value: script output
- """
- self._instance.start(wait=True)
- return self._instance.run_script(*args, **kwargs)
-
- def snapshot(self):
- """Create snapshot of image, block until done.
-
- Will return base image_ami if no instance has been booted, otherwise
- will run the clean script, shutdown the instance, create a custom
- AMI, and use that AMI once available.
- """
- if not self._img_instance:
- return EC2Snapshot(self.platform, self.properties, self.config,
- self.features, self.image_ami,
- delete_on_destroy=False)
-
- if self.config.get('boot_clean_script'):
- self._img_instance.run_script(self.config.get('boot_clean_script'))
-
- self._img_instance.shutdown(wait=True)
-
- LOG.debug('creating custom ami from instance %s',
- self._img_instance.instance.instance_id)
- response = self.platform.ec2_client.create_image(
- Name='%s-%s' % (self.platform.tag, self.image_ami),
- InstanceId=self._img_instance.instance.instance_id
- )
- image_ami_edited = response['ImageId']
-
- # Create image and wait until it is in the 'available' state
- image = self.platform.ec2_resource.Image(image_ami_edited)
- image.wait_until_exists()
- waiter = self.platform.ec2_client.get_waiter('image_available')
- waiter.wait(ImageIds=[image.id])
- image.reload()
-
- return EC2Snapshot(self.platform, self.properties, self.config,
- self.features, image_ami_edited)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/ec2/instance.py b/tests/cloud_tests/platforms/ec2/instance.py
deleted file mode 100644
index d2e84047..00000000
--- a/tests/cloud_tests/platforms/ec2/instance.py
+++ /dev/null
@@ -1,132 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base EC2 instance."""
-import os
-
-import botocore
-
-from ..instances import Instance
-from tests.cloud_tests import LOG, util
-
-
-class EC2Instance(Instance):
- """EC2 backed instance."""
-
- platform_name = "ec2"
- _ssh_client = None
-
- def __init__(self, platform, properties, config, features,
- image_ami, user_data=None):
- """Set up instance.
-
- @param platform: platform object
- @param properties: dictionary of properties
- @param config: dictionary of configuration values
- @param features: dictionary of supported feature flags
- @param image_ami: AWS AMI ID for image to use
- @param user_data: test user-data to pass to instance
- """
- super(EC2Instance, self).__init__(
- platform, image_ami, properties, config, features)
-
- self.image_ami = image_ami
- self.instance = None
- self.user_data = user_data
- self.ssh_ip = None
- self.ssh_port = 22
- self.ssh_key_file = os.path.join(
- platform.config['data_dir'], platform.config['private_key'])
- self.ssh_pubkey_file = os.path.join(
- platform.config['data_dir'], platform.config['public_key'])
-
- def console_log(self):
- """Collect console log from instance.
-
- The console log is buffered and not always present, therefore
- may return empty string.
- """
- try:
- # OutputBytes comes from platform._decode_console_output_as_bytes
- response = self.instance.console_output()
- return response['OutputBytes']
- except KeyError as e:
- if 'Output' in response:
- msg = ("'OutputBytes' did not exist in console_output() but "
- "'Output' did: %s..." % response['Output'][0:128])
- raise util.PlatformError('console_log', msg) from e
- return ('No Console Output [%s]' % self.instance).encode()
-
- def destroy(self):
- """Clean up instance."""
- if self.instance:
- LOG.debug('destroying instance %s', self.instance.id)
- self.instance.terminate()
- self.instance.wait_until_terminated()
-
- self._ssh_close()
-
- super(EC2Instance, self).destroy()
-
- def _execute(self, command, stdin=None, env=None):
- """Execute command on instance."""
- env_args = []
- if env:
- env_args = ['env'] + ["%s=%s" for k, v in env.items()]
-
- return self._ssh(['sudo'] + env_args + list(command), stdin=stdin)
-
- def start(self, wait=True, wait_for_cloud_init=False):
- """Start instance on EC2 with the platfrom's VPC."""
- if self.instance:
- if self.instance.state['Name'] == 'running':
- return
-
- LOG.debug('starting instance %s', self.instance.id)
- self.instance.start()
- else:
- LOG.debug('launching instance')
-
- args = {
- 'ImageId': self.image_ami,
- 'InstanceType': self.platform.instance_type,
- 'KeyName': self.platform.key_name,
- 'MaxCount': 1,
- 'MinCount': 1,
- 'SecurityGroupIds': [self.platform.security_group.id],
- 'SubnetId': self.platform.subnet.id,
- 'TagSpecifications': [{
- 'ResourceType': 'instance',
- 'Tags': [{
- 'Key': 'Name', 'Value': self.platform.tag
- }]
- }],
- }
-
- if self.user_data:
- args['UserData'] = self.user_data
-
- try:
- instances = self.platform.ec2_resource.create_instances(**args)
- except botocore.exceptions.ClientError as error:
- error_msg = error.response['Error']['Message']
- raise util.PlatformError('start', error_msg)
-
- self.instance = instances[0]
-
- LOG.debug('instance id: %s', self.instance.id)
- if wait:
- self.instance.wait_until_running()
- self.instance.reload()
- self.ssh_ip = self.instance.public_ip_address
- self._wait_for_system(wait_for_cloud_init)
-
- def shutdown(self, wait=True):
- """Shutdown instance."""
- LOG.debug('stopping instance %s', self.instance.id)
- self.instance.stop()
-
- if wait:
- self.instance.wait_until_stopped()
- self.instance.reload()
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/ec2/platform.py b/tests/cloud_tests/platforms/ec2/platform.py
deleted file mode 100644
index b61a2ffb..00000000
--- a/tests/cloud_tests/platforms/ec2/platform.py
+++ /dev/null
@@ -1,263 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base EC2 platform."""
-from datetime import datetime
-import os
-
-import boto3
-import botocore
-from botocore import session, handlers
-import base64
-
-from ..platforms import Platform
-from .image import EC2Image
-from .instance import EC2Instance
-from tests.cloud_tests import LOG
-
-
-class EC2Platform(Platform):
- """EC2 test platform."""
-
- platform_name = 'ec2'
- ipv4_cidr = '192.168.1.0/20'
-
- def __init__(self, config):
- """Set up platform."""
- super(EC2Platform, self).__init__(config)
- # Used for unique VPC, SSH key, and custom AMI generation naming
- self.tag = '%s-%s' % (
- config['tag'], datetime.now().strftime('%Y%m%d%H%M%S'))
- self.instance_type = config['instance-type']
-
- try:
- b3session = get_session()
- self.ec2_client = b3session.client('ec2')
- self.ec2_resource = b3session.resource('ec2')
- self.ec2_region = b3session.region_name
- self.key_name = self._upload_public_key(config)
- except botocore.exceptions.NoRegionError as e:
- raise RuntimeError(
- 'Please configure default region in $HOME/.aws/config'
- ) from e
- except botocore.exceptions.NoCredentialsError as e:
- raise RuntimeError(
- 'Please configure ec2 credentials in $HOME/.aws/credentials'
- ) from e
-
- self.vpc = self._create_vpc()
- self.internet_gateway = self._create_internet_gateway()
- self.subnet = self._create_subnet()
- self.routing_table = self._create_routing_table()
- self.security_group = self._create_security_group()
-
- def create_instance(self, properties, config, features,
- image_ami, user_data=None):
- """Create an instance
-
- @param src_img_path: image path to launch from
- @param properties: image properties
- @param config: image configuration
- @param features: image features
- @param image_ami: string of image ami ID
- @param user_data: test user-data to pass to instance
- @return_value: cloud_tests.instances instance
- """
- return EC2Instance(self, properties, config, features,
- image_ami, user_data)
-
- def destroy(self):
- """Delete SSH keys, terminate all instances, and delete VPC."""
- for instance in self.vpc.instances.all():
- LOG.debug('waiting for instance %s termination', instance.id)
- instance.terminate()
- instance.wait_until_terminated()
-
- if self.key_name:
- LOG.debug('deleting SSH key %s', self.key_name)
- self.ec2_client.delete_key_pair(KeyName=self.key_name)
-
- if self.security_group:
- LOG.debug('deleting security group %s', self.security_group.id)
- self.security_group.delete()
-
- if self.subnet:
- LOG.debug('deleting subnet %s', self.subnet.id)
- self.subnet.delete()
-
- if self.routing_table:
- LOG.debug('deleting routing table %s', self.routing_table.id)
- self.routing_table.delete()
-
- if self.internet_gateway:
- LOG.debug('deleting internet gateway %s', self.internet_gateway.id)
- self.internet_gateway.detach_from_vpc(VpcId=self.vpc.id)
- self.internet_gateway.delete()
-
- if self.vpc:
- LOG.debug('deleting vpc %s', self.vpc.id)
- self.vpc.delete()
-
- def get_image(self, img_conf):
- """Get image using specified image configuration.
-
- Hard coded for 'amd64' based images.
-
- @param img_conf: configuration for image
- @return_value: cloud_tests.images instance
- """
- if img_conf['root-store'] == 'ebs':
- root_store = 'ssd'
- elif img_conf['root-store'] == 'instance-store':
- root_store = 'instance'
- else:
- raise RuntimeError('Unknown root-store type: %s' %
- (img_conf['root-store']))
-
- filters = [
- 'arch=%s' % 'amd64',
- 'endpoint=https://ec2.%s.amazonaws.com' % self.ec2_region,
- 'region=%s' % self.ec2_region,
- 'release=%s' % img_conf['release'],
- 'root_store=%s' % root_store,
- 'virt=hvm',
- ]
-
- LOG.debug('finding image using streams')
- image = self._query_streams(img_conf, filters)
-
- try:
- image_ami = image['id']
- except KeyError as e:
- raise RuntimeError(
- 'No images found for %s!' % img_conf['release']
- ) from e
-
- LOG.debug('found image: %s', image_ami)
- image = EC2Image(self, img_conf, image_ami)
- return image
-
- def _create_internet_gateway(self):
- """Create Internet Gateway and assign to VPC."""
- LOG.debug('creating internet gateway')
- # pylint: disable=no-member
- internet_gateway = self.ec2_resource.create_internet_gateway()
- internet_gateway.attach_to_vpc(VpcId=self.vpc.id)
- self._tag_resource(internet_gateway)
-
- return internet_gateway
-
- def _create_routing_table(self):
- """Update default routing table with internet gateway.
-
- This sets up internet access between the VPC via the internet gateway
- by configuring routing tables for IPv4 and IPv6.
- """
- LOG.debug('creating routing table')
- route_table = self.vpc.create_route_table()
- route_table.create_route(DestinationCidrBlock='0.0.0.0/0',
- GatewayId=self.internet_gateway.id)
- route_table.create_route(DestinationIpv6CidrBlock='::/0',
- GatewayId=self.internet_gateway.id)
- route_table.associate_with_subnet(SubnetId=self.subnet.id)
- self._tag_resource(route_table)
-
- return route_table
-
- def _create_security_group(self):
- """Enables ingress to default VPC security group."""
- LOG.debug('creating security group')
- security_group = self.vpc.create_security_group(
- GroupName=self.tag, Description='integration test security group')
- security_group.authorize_ingress(
- IpProtocol='-1', FromPort=-1, ToPort=-1, CidrIp='0.0.0.0/0')
- self._tag_resource(security_group)
-
- return security_group
-
- def _create_subnet(self):
- """Generate IPv4 and IPv6 subnets for use."""
- ipv6_cidr = self.vpc.ipv6_cidr_block_association_set[0][
- 'Ipv6CidrBlock'][:-2] + '64'
-
- LOG.debug('creating subnet with following ranges:')
- LOG.debug('ipv4: %s', self.ipv4_cidr)
- LOG.debug('ipv6: %s', ipv6_cidr)
- subnet = self.vpc.create_subnet(CidrBlock=self.ipv4_cidr,
- Ipv6CidrBlock=ipv6_cidr)
- modify_subnet = subnet.meta.client.modify_subnet_attribute
- modify_subnet(SubnetId=subnet.id,
- MapPublicIpOnLaunch={'Value': True})
- self._tag_resource(subnet)
-
- return subnet
-
- def _create_vpc(self):
- """Setup AWS EC2 VPC or return existing VPC."""
- LOG.debug('creating new vpc')
- try:
- vpc = self.ec2_resource.create_vpc( # pylint: disable=no-member
- CidrBlock=self.ipv4_cidr,
- AmazonProvidedIpv6CidrBlock=True)
- except botocore.exceptions.ClientError as e:
- raise RuntimeError(e) from e
-
- vpc.wait_until_available()
- self._tag_resource(vpc)
-
- return vpc
-
- def _tag_resource(self, resource):
- """Tag a resource with the specified tag.
-
- This makes finding and deleting resources specific to this testing
- much easier to find.
-
- @param resource: resource to tag
- """
- tag = {
- 'Key': 'Name',
- 'Value': self.tag
- }
- resource.create_tags(Tags=[tag])
-
- def _upload_public_key(self, config):
- """Generate random name and upload SSH key with that name.
-
- @param config: platform config
- @return: string of ssh key name
- """
- key_file = os.path.join(config['data_dir'], config['public_key'])
- with open(key_file, 'r') as file:
- public_key = file.read().strip('\n')
-
- LOG.debug('uploading SSH key %s', self.tag)
- self.ec2_client.import_key_pair(KeyName=self.tag,
- PublicKeyMaterial=public_key)
-
- return self.tag
-
-
-def _decode_console_output_as_bytes(parsed, **kwargs):
- """Provide console output as bytes in OutputBytes.
-
- For this to be useful, the session has to have had the
- decode_console_output handler unregistered already.
-
- https://github.com/boto/botocore/issues/1351 ."""
- if 'Output' not in parsed:
- return
- orig = parsed['Output']
- handlers.decode_console_output(parsed, **kwargs)
- parsed['OutputBytes'] = base64.b64decode(orig)
-
-
-def get_session():
- mysess = session.get_session()
- mysess.unregister('after-call.ec2.GetConsoleOutput',
- handlers.decode_console_output)
- mysess.register('after-call.ec2.GetConsoleOutput',
- _decode_console_output_as_bytes)
- return boto3.Session(botocore_session=mysess)
-
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/ec2/snapshot.py b/tests/cloud_tests/platforms/ec2/snapshot.py
deleted file mode 100644
index 2c48cb54..00000000
--- a/tests/cloud_tests/platforms/ec2/snapshot.py
+++ /dev/null
@@ -1,66 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base EC2 snapshot."""
-
-from ..snapshots import Snapshot
-from tests.cloud_tests import LOG
-
-
-class EC2Snapshot(Snapshot):
- """EC2 image copy backed snapshot."""
-
- platform_name = 'ec2'
-
- def __init__(self, platform, properties, config, features, image_ami,
- delete_on_destroy=True):
- """Set up snapshot.
-
- @param platform: platform object
- @param properties: image properties
- @param config: image config
- @param features: supported feature flags
- @param image_ami: string of image ami ID
- @param delete_on_destroy: boolean to delete on destroy
- """
- super(EC2Snapshot, self).__init__(
- platform, properties, config, features)
-
- self.image_ami = image_ami
- self.delete_on_destroy = delete_on_destroy
-
- def destroy(self):
- """Deregister the backing AMI."""
- if self.delete_on_destroy:
- image = self.platform.ec2_resource.Image(self.image_ami)
- snapshot_id = image.block_device_mappings[0]['Ebs']['SnapshotId']
-
- LOG.debug('removing custom ami %s', self.image_ami)
- self.platform.ec2_client.deregister_image(ImageId=self.image_ami)
-
- LOG.debug('removing custom snapshot %s', snapshot_id)
- self.platform.ec2_client.delete_snapshot(SnapshotId=snapshot_id)
-
- def launch(self, user_data, meta_data=None, block=True, start=True,
- use_desc=None):
- """Launch instance.
-
- @param user_data: user-data for the instance
- @param meta_data: meta_data for the instance
- @param block: wait until instance is created
- @param start: start instance and wait until fully started
- @param use_desc: string of test name
- @return_value: an Instance
- """
- if meta_data is not None:
- raise ValueError("metadata not supported on Ec2")
-
- instance = self.platform.create_instance(
- self.properties, self.config, self.features,
- self.image_ami, user_data)
-
- if start:
- instance.start()
-
- return instance
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/images.py b/tests/cloud_tests/platforms/images.py
deleted file mode 100644
index f047de2e..00000000
--- a/tests/cloud_tests/platforms/images.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base class for images."""
-
-from ..util import TargetBase
-
-
-class Image(TargetBase):
- """Base class for images."""
-
- platform_name = None
-
- def __init__(self, platform, config):
- """Set up image.
-
- @param platform: platform object
- @param config: image configuration
- """
- self.platform = platform
- self.config = config
-
- def __str__(self):
- """A brief description of the image."""
- return '-'.join((self.properties['os'], self.properties['release']))
-
- @property
- def properties(self):
- """{} containing: 'arch', 'os', 'version', 'release'."""
- return {k: self.config[k]
- for k in ('arch', 'os', 'release', 'version')}
-
- @property
- def features(self):
- """Feature flags supported by this image.
-
- @return_value: list of feature names
- """
- return [k for k, v in self.config.get('features', {}).items() if v]
-
- @property
- def setup_overrides(self):
- """Setup options that need to be overridden for the image.
-
- @return_value: dictionary to update args with
- """
- # NOTE: more sophisticated options may be requied at some point
- return self.config.get('setup_overrides', {})
-
- def snapshot(self):
- """Create snapshot of image, block until done."""
- raise NotImplementedError
-
- def destroy(self):
- """Clean up data associated with image."""
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/instances.py b/tests/cloud_tests/platforms/instances.py
deleted file mode 100644
index efc35c7f..00000000
--- a/tests/cloud_tests/platforms/instances.py
+++ /dev/null
@@ -1,165 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base instance."""
-import time
-
-import paramiko
-from paramiko.ssh_exception import (
- BadHostKeyException, AuthenticationException, SSHException)
-
-from ..util import TargetBase
-from tests.cloud_tests import LOG, util
-
-
-class Instance(TargetBase):
- """Base instance object."""
-
- platform_name = None
- _ssh_client = None
-
- def __init__(self, platform, name, properties, config, features):
- """Set up instance.
-
- @param platform: platform object
- @param name: hostname of instance
- @param properties: image properties
- @param config: image config
- @param features: supported feature flags
- """
- self.platform = platform
- self.name = name
- self.properties = properties
- self.config = config
- self.features = features
- self._tmp_count = 0
-
- self.ssh_ip = None
- self.ssh_port = None
- self.ssh_key_file = None
- self.ssh_username = 'ubuntu'
-
- def console_log(self):
- """Instance console.
-
- @return_value: bytes of this instance’s console
- """
- raise NotImplementedError
-
- def reboot(self, wait=True):
- """Reboot instance."""
- raise NotImplementedError
-
- def shutdown(self, wait=True):
- """Shutdown instance."""
- raise NotImplementedError
-
- def start(self, wait=True, wait_for_cloud_init=False):
- """Start instance."""
- raise NotImplementedError
-
- def destroy(self):
- """Clean up instance."""
- self._ssh_close()
-
- def _ssh(self, command, stdin=None):
- """Run a command via SSH."""
- client = self._ssh_connect()
-
- cmd = util.shell_pack(command)
- fp_in, fp_out, fp_err = client.exec_command(cmd)
- channel = fp_in.channel
-
- if stdin is not None:
- fp_in.write(stdin)
- fp_in.close()
-
- channel.shutdown_write()
- rc = channel.recv_exit_status()
-
- return (fp_out.read(), fp_err.read(), rc)
-
- def _ssh_close(self):
- if self._ssh_client:
- try:
- self._ssh_client.close()
- except SSHException:
- LOG.warning('Failed to close SSH connection.')
- self._ssh_client = None
-
- def _ssh_connect(self):
- """Connect via SSH.
-
- Attempt to SSH to the client on the specific IP and port. If it
- fails in some manner, then retry 2 more times for a total of 3
- attempts; sleeping a few seconds between attempts.
- """
- if self._ssh_client:
- return self._ssh_client
-
- if not self.ssh_ip or not self.ssh_port:
- raise ValueError("Cannot ssh_connect, ssh_ip=%s ssh_port=%s" %
- (self.ssh_ip, self.ssh_port))
-
- client = paramiko.SSHClient()
- client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
- private_key = paramiko.RSAKey.from_private_key_file(self.ssh_key_file)
-
- retries = 3
- while retries:
- try:
- client.connect(username=self.ssh_username,
- hostname=self.ssh_ip, port=self.ssh_port,
- pkey=private_key)
- self._ssh_client = client
- return client
- except (ConnectionRefusedError, AuthenticationException,
- BadHostKeyException, ConnectionResetError, SSHException,
- OSError):
- retries -= 1
- LOG.debug('Retrying ssh connection on connect failure')
- time.sleep(3)
-
- ssh_cmd = 'Failed ssh connection to %s@%s:%s after 3 retries' % (
- self.ssh_username, self.ssh_ip, self.ssh_port
- )
- raise util.InTargetExecuteError(b'', b'', 1, ssh_cmd, 'ssh')
-
- def _wait_for_system(self, wait_for_cloud_init):
- """Wait until system has fully booted and cloud-init has finished.
-
- @param wait_time: maximum time to wait
- @return_value: None, may raise OSError if wait_time exceeded
- """
- def clean_test(test):
- """Clean formatting for system ready test testcase."""
- return ' '.join(line for line in test.strip().splitlines()
- if not line.lstrip().startswith('#'))
-
- boot_timeout = self.config['boot_timeout']
- tests = [self.config['system_ready_script']]
- if wait_for_cloud_init:
- tests.append(self.config['cloud_init_ready_script'])
-
- formatted_tests = ' && '.join(clean_test(t) for t in tests)
- cmd = ('i=0; while [ $i -lt {time} ] && i=$(($i+1)); do {test} && '
- 'exit 0; sleep 1; done; exit 1').format(time=boot_timeout,
- test=formatted_tests)
-
- end_time = time.time() + boot_timeout
- while True:
- try:
- return_code = self.execute(
- cmd, rcs=(0, 1), description='wait for instance start'
- )[-1]
- if return_code == 0:
- break
- except util.InTargetExecuteError:
- LOG.warning("failed to connect via SSH")
-
- if time.time() < end_time:
- time.sleep(3)
- else:
- raise util.PlatformError('ssh', 'after %ss instance is not '
- 'reachable' % boot_timeout)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/lxd/image.py b/tests/cloud_tests/platforms/lxd/image.py
deleted file mode 100644
index a88b47f3..00000000
--- a/tests/cloud_tests/platforms/lxd/image.py
+++ /dev/null
@@ -1,211 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""LXD Image Base Class."""
-
-import os
-import shutil
-import tempfile
-
-from ..images import Image
-from .snapshot import LXDSnapshot
-from cloudinit import subp
-from cloudinit import util as c_util
-from tests.cloud_tests import util
-
-
-class LXDImage(Image):
- """LXD backed image."""
-
- platform_name = "lxd"
-
- def __init__(self, platform, config, pylxd_image):
- """Set up image.
-
- @param platform: platform object
- @param config: image configuration
- """
- self.modified = False
- self._img_instance = None
- self._pylxd_image = None
- self.pylxd_image = pylxd_image
- super(LXDImage, self).__init__(platform, config)
-
- @property
- def pylxd_image(self):
- """Property function."""
- if self._pylxd_image:
- self._pylxd_image.sync()
- return self._pylxd_image
-
- @pylxd_image.setter
- def pylxd_image(self, pylxd_image):
- if self._img_instance:
- self._instance.destroy()
- self._img_instance = None
- if (self._pylxd_image and
- (self._pylxd_image is not pylxd_image) and
- (not self.config.get('cache_base_image') or self.modified)):
- self._pylxd_image.delete(wait=True)
- self.modified = False
- self._pylxd_image = pylxd_image
-
- @property
- def _instance(self):
- """Internal use only, returns a instance
-
- This starts an lxc instance from the image, so it is "dirty".
- Better would be some way to modify this "at rest".
- lxc-pstart would be an option."""
- if not self._img_instance:
- self._img_instance = self.platform.launch_container(
- self.properties, self.config, self.features,
- use_desc='image-modification', image_desc=str(self),
- image=self.pylxd_image.fingerprint)
- self._img_instance.start()
- return self._img_instance
-
- @property
- def properties(self):
- """{} containing: 'arch', 'os', 'version', 'release'."""
- properties = self.pylxd_image.properties
- return {
- 'arch': properties.get('architecture'),
- 'os': properties.get('os'),
- 'version': properties.get('version'),
- 'release': properties.get('release'),
- }
-
- def export_image(self, output_dir):
- """Export image from lxd image store to disk.
-
- @param output_dir: dir to store the exported image in
- @return_value: tuple of path to metadata tarball and rootfs
-
- Only the "split" image format with separate rootfs and metadata
- files is supported, e.g:
-
- 71f171df[...]cd31.squashfs (could also be: .tar.xz or .tar.gz)
- meta-71f171df[...]cd31.tar.xz
-
- Combined images made by a single tarball are not supported.
- """
- # pylxd's image export feature doesn't do split exports, so use cmdline
- fp = self.pylxd_image.fingerprint
- subp.subp(['lxc', 'image', 'export', fp, output_dir], capture=True)
- image_files = [p for p in os.listdir(output_dir) if fp in p]
-
- if len(image_files) != 2:
- raise NotImplementedError(
- "Image %s has unsupported format. "
- "Expected 2 files, found %d: %s."
- % (fp, len(image_files), ', '.join(image_files)))
-
- metadata = os.path.join(
- output_dir,
- next(p for p in image_files if p.startswith('meta-')))
- rootfs = os.path.join(
- output_dir,
- next(p for p in image_files if not p.startswith('meta-')))
- return (metadata, rootfs)
-
- def import_image(self, metadata, rootfs):
- """Import image to lxd image store from (split) tarball on disk.
-
- Note, this will replace and delete the current pylxd_image
-
- @param metadata: metadata tarball
- @param rootfs: rootfs tarball
- @return_value: imported image fingerprint
- """
- alias = util.gen_instance_name(
- image_desc=str(self), use_desc='update-metadata')
- subp.subp(['lxc', 'image', 'import', metadata, rootfs,
- '--alias', alias], capture=True)
- self.pylxd_image = self.platform.query_image_by_alias(alias)
- return self.pylxd_image.fingerprint
-
- def update_templates(self, template_config, template_data):
- """Update the image's template configuration.
-
- Note, this will replace and delete the current pylxd_image
-
- @param template_config: config overrides for template metadata
- @param template_data: template data to place into templates/
- """
- # set up tmp files
- export_dir = tempfile.mkdtemp(prefix='cloud_test_util_')
- extract_dir = tempfile.mkdtemp(prefix='cloud_test_util_')
- new_metadata = os.path.join(export_dir, 'new-meta.tar.xz')
- metadata_yaml = os.path.join(extract_dir, 'metadata.yaml')
- template_dir = os.path.join(extract_dir, 'templates')
-
- try:
- # extract old data
- (metadata, rootfs) = self.export_image(export_dir)
- shutil.unpack_archive(metadata, extract_dir)
-
- # update metadata
- metadata = c_util.read_conf(metadata_yaml)
- templates = metadata.get('templates', {})
- templates.update(template_config)
- metadata['templates'] = templates
- util.yaml_dump(metadata, metadata_yaml)
-
- # write out template files
- for name, content in template_data.items():
- path = os.path.join(template_dir, name)
- c_util.write_file(path, content)
-
- # store new data, mark new image as modified
- util.flat_tar(new_metadata, extract_dir)
- self.import_image(new_metadata, rootfs)
- self.modified = True
-
- finally:
- # remove tmpfiles
- shutil.rmtree(export_dir)
- shutil.rmtree(extract_dir)
-
- def _execute(self, *args, **kwargs):
- """Execute command in image, modifying image."""
- return self._instance._execute(*args, **kwargs)
-
- def push_file(self, local_path, remote_path):
- """Copy file at 'local_path' to instance at 'remote_path'."""
- return self._instance.push_file(local_path, remote_path)
-
- def run_script(self, *args, **kwargs):
- """Run script in image, modifying image.
-
- @return_value: script output
- """
- return self._instance.run_script(*args, **kwargs)
-
- def snapshot(self):
- """Create snapshot of image, block until done."""
- # get empty user data to pass in to instance
- # if overrides for user data provided, use them
- empty_userdata = util.update_user_data(
- {}, self.config.get('user_data_overrides', {}))
- conf = {'user.user-data': empty_userdata}
- # clone current instance
- instance = self.platform.launch_container(
- self.properties, self.config, self.features,
- container=self._instance.name, image_desc=str(self),
- use_desc='snapshot', container_config=conf)
- # wait for cloud-init before boot_clean_script is run to ensure
- # /var/lib/cloud is removed cleanly
- instance.start(wait=True, wait_for_cloud_init=True)
- if self.config.get('boot_clean_script'):
- instance.run_script(self.config.get('boot_clean_script'))
- # freeze current instance and return snapshot
- instance.freeze()
- return LXDSnapshot(self.platform, self.properties, self.config,
- self.features, instance)
-
- def destroy(self):
- """Clean up data associated with image."""
- self.pylxd_image = None
- super(LXDImage, self).destroy()
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/lxd/instance.py b/tests/cloud_tests/platforms/lxd/instance.py
deleted file mode 100644
index 2b973a08..00000000
--- a/tests/cloud_tests/platforms/lxd/instance.py
+++ /dev/null
@@ -1,278 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base LXD instance."""
-
-import os
-import shutil
-import time
-from tempfile import mkdtemp
-
-from cloudinit.subp import subp, ProcessExecutionError, which
-from cloudinit.util import load_yaml
-from tests.cloud_tests import LOG
-from tests.cloud_tests.util import PlatformError
-
-from ..instances import Instance
-
-from pylxd import exceptions as pylxd_exc
-
-
-class LXDInstance(Instance):
- """LXD container backed instance."""
-
- platform_name = "lxd"
- _console_log_method = None
- _console_log_file = None
-
- def __init__(self, platform, name, properties, config, features,
- pylxd_container):
- """Set up instance.
-
- @param platform: platform object
- @param name: hostname of instance
- @param properties: image properties
- @param config: image config
- @param features: supported feature flags
- """
- if not pylxd_container:
- raise ValueError("Invalid value pylxd_container: %s" %
- pylxd_container)
- self._pylxd_container = pylxd_container
- super(LXDInstance, self).__init__(
- platform, name, properties, config, features)
- self.tmpd = mkdtemp(prefix="%s-%s" % (type(self).__name__, name))
- self.name = name
- self._setup_console_log()
-
- @property
- def pylxd_container(self):
- """Property function."""
- if self._pylxd_container is None:
- raise RuntimeError(
- "%s: Attempted use of pylxd_container after deletion." % self)
- self._pylxd_container.sync()
- return self._pylxd_container
-
- def __str__(self):
- return (
- '%s(name=%s) status=%s' %
- (self.__class__.__name__, self.name,
- ("deleted" if self._pylxd_container is None else
- self.pylxd_container.status)))
-
- def _execute(self, command, stdin=None, env=None):
- if env is None:
- env = {}
-
- env_args = []
- if env:
- env_args = ['env'] + ["%s=%s" for k, v in env.items()]
-
- # ensure instance is running and execute the command
- self.start()
-
- # Use cmdline client due to https://github.com/lxc/pylxd/issues/268
- exit_code = 0
- try:
- stdout, stderr = subp(
- ['lxc', 'exec', self.name, '--'] + env_args + list(command),
- data=stdin, decode=False)
- except ProcessExecutionError as e:
- exit_code = e.exit_code
- stdout = e.stdout
- stderr = e.stderr
-
- return stdout, stderr, exit_code
-
- def read_data(self, remote_path, decode=False):
- """Read data from instance filesystem.
-
- @param remote_path: path in instance
- @param decode: decode data before returning.
- @return_value: content of remote_path as bytes if 'decode' is False,
- and as string if 'decode' is True.
- """
- data = self.pylxd_container.files.get(remote_path)
- return data.decode() if decode else data
-
- def write_data(self, remote_path, data):
- """Write data to instance filesystem.
-
- @param remote_path: path in instance
- @param data: data to write in bytes
- """
- self.pylxd_container.files.put(remote_path, data)
-
- @property
- def console_log_method(self):
- if self._console_log_method is not None:
- return self._console_log_method
-
- client = which('lxc')
- if not client:
- raise PlatformError("No 'lxc' client.")
-
- elif _has_proper_console_support():
- self._console_log_method = 'show-log'
- elif client.startswith("/snap"):
- self._console_log_method = 'logfile-snap'
- else:
- self._console_log_method = 'logfile-tmp'
-
- LOG.debug("Set console log method to %s", self._console_log_method)
- return self._console_log_method
-
- def _setup_console_log(self):
- method = self.console_log_method
- if not method.startswith("logfile-"):
- return
-
- if method == "logfile-snap":
- log_dir = "/var/snap/lxd/common/consoles"
- if not os.path.exists(log_dir):
- raise PlatformError(
- "Unable to log with snap lxc. Please run:\n"
- " sudo mkdir --mode=1777 -p %s" % log_dir)
- elif method == "logfile-tmp":
- log_dir = "/tmp"
- else:
- raise PlatformError(
- "Unexpected value for console method: %s" % method)
-
- # doing this ensures we can read it. Otherwise it ends up root:root.
- log_file = os.path.join(log_dir, self.name)
- with open(log_file, "w") as fp:
- fp.write("# %s\n" % self.name)
-
- cfg = "lxc.console.logfile=%s" % log_file
- orig = self._pylxd_container.config.get('raw.lxc', "")
- if orig:
- orig += "\n"
- self._pylxd_container.config['raw.lxc'] = orig + cfg
- self._pylxd_container.save()
- self._console_log_file = log_file
-
- def console_log(self):
- """Console log.
-
- @return_value: bytes of this instance's console
- """
-
- if self._console_log_file:
- if not os.path.exists(self._console_log_file):
- raise NotImplementedError(
- "Console log '%s' does not exist. If this is a remote "
- "lxc, then this is really NotImplementedError. If it is "
- "A local lxc, then this is a RuntimeError."
- "https://github.com/lxc/lxd/issues/1129")
- with open(self._console_log_file, "rb") as fp:
- return fp.read()
-
- try:
- return subp(['lxc', 'console', '--show-log', self.name],
- decode=False)[0]
- except ProcessExecutionError as e:
- raise PlatformError(
- "console log",
- "Console log failed [%d]: stdout=%s stderr=%s" % (
- e.exit_code, e.stdout, e.stderr)
- ) from e
-
- def reboot(self, wait=True):
- """Reboot instance."""
- self.shutdown(wait=wait)
- self.start(wait=wait)
-
- def shutdown(self, wait=True, retry=1):
- """Shutdown instance."""
- if self.pylxd_container.status == 'Stopped':
- return
-
- try:
- LOG.debug("%s: shutting down (wait=%s)", self, wait)
- self.pylxd_container.stop(wait=wait)
- except (pylxd_exc.LXDAPIException, pylxd_exc.NotFound) as e:
- # An exception happens here sometimes (LP: #1783198)
- # LOG it, and try again.
- LOG.warning(
- ("%s: shutdown(retry=%d) caught %s in shutdown "
- "(response=%s): %s"),
- self, retry, e.__class__.__name__, e.response, e)
- if isinstance(e, pylxd_exc.NotFound):
- LOG.debug("container_exists(%s) == %s",
- self.name, self.platform.container_exists(self.name))
- if retry == 0:
- raise e
- return self.shutdown(wait=wait, retry=retry - 1)
-
- def start(self, wait=True, wait_for_cloud_init=False):
- """Start instance."""
- if self.pylxd_container.status != 'Running':
- self.pylxd_container.start(wait=wait)
- if wait:
- self._wait_for_system(wait_for_cloud_init)
-
- def freeze(self):
- """Freeze instance."""
- if self.pylxd_container.status != 'Frozen':
- self.pylxd_container.freeze(wait=True)
-
- def unfreeze(self):
- """Unfreeze instance."""
- if self.pylxd_container.status == 'Frozen':
- self.pylxd_container.unfreeze(wait=True)
-
- def destroy(self):
- """Clean up instance."""
- LOG.debug("%s: deleting container.", self)
- self.unfreeze()
- self.shutdown()
- retries = [1] * 5
- for attempt, wait in enumerate(retries):
- try:
- self.pylxd_container.delete(wait=True)
- break
- except Exception:
- if attempt + 1 >= len(retries):
- raise
- LOG.debug('Failed to delete container %s (%s/%s) retrying...',
- self, attempt + 1, len(retries))
- time.sleep(wait)
-
- self._pylxd_container = None
-
- if self.platform.container_exists(self.name):
- raise OSError('%s: container was not properly removed' % self)
- if self._console_log_file and os.path.exists(self._console_log_file):
- os.unlink(self._console_log_file)
- shutil.rmtree(self.tmpd)
- super(LXDInstance, self).destroy()
-
-
-def _has_proper_console_support():
- stdout, _ = subp(['lxc', 'info'])
- info = load_yaml(stdout)
- reason = None
- if 'console' not in info.get('api_extensions', []):
- reason = "LXD server does not support console api extension"
- else:
- dver = str(info.get('environment', {}).get('driver_version', ""))
- if dver.startswith("2.") or dver.startswith("1."):
- reason = "LXD Driver version not 3.x+ (%s)" % dver
- else:
- try:
- stdout = subp(['lxc', 'console', '--help'], decode=False)[0]
- if not (b'console' in stdout and b'log' in stdout):
- reason = "no '--log' in lxc console --help"
- except ProcessExecutionError:
- reason = "no 'console' command in lxc client"
-
- if reason:
- LOG.debug("no console-support: %s", reason)
- return False
- else:
- LOG.debug("console-support looks good")
- return True
-
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/lxd/platform.py b/tests/cloud_tests/platforms/lxd/platform.py
deleted file mode 100644
index f7251a07..00000000
--- a/tests/cloud_tests/platforms/lxd/platform.py
+++ /dev/null
@@ -1,104 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base LXD platform."""
-
-from pylxd import (Client, exceptions)
-
-from ..platforms import Platform
-from .image import LXDImage
-from .instance import LXDInstance
-from tests.cloud_tests import util
-
-DEFAULT_SSTREAMS_SERVER = "https://images.linuxcontainers.org:8443"
-
-
-class LXDPlatform(Platform):
- """LXD test platform."""
-
- platform_name = 'lxd'
-
- def __init__(self, config):
- """Set up platform."""
- super(LXDPlatform, self).__init__(config)
- # TODO: allow configuration of remote lxd host via env variables
- # set up lxd connection
- self.client = Client()
-
- def get_image(self, img_conf):
- """Get image using specified image configuration.
-
- @param img_conf: configuration for image
- @return_value: cloud_tests.images instance
- """
- pylxd_image = self.client.images.create_from_simplestreams(
- img_conf.get('sstreams_server', DEFAULT_SSTREAMS_SERVER),
- img_conf['alias'])
- image = LXDImage(self, img_conf, pylxd_image)
- if img_conf.get('override_templates', False):
- image.update_templates(self.config.get('template_overrides', {}),
- self.config.get('template_files', {}))
- return image
-
- def launch_container(self, properties, config, features,
- image=None, container=None, ephemeral=False,
- container_config=None, block=True, image_desc=None,
- use_desc=None):
- """Launch a container.
-
- @param properties: image properties
- @param config: image configuration
- @param features: image features
- @param image: image fingerprint to launch from
- @param container: container to copy
- @param ephemeral: delete image after first shutdown
- @param container_config: config options for instance as dict
- @param block: wait until container created
- @param image_desc: description of image being launched
- @param use_desc: description of container's use
- @return_value: cloud_tests.instances instance
- """
- if not (image or container):
- raise ValueError("either image or container must be specified")
- container = self.client.containers.create({
- 'name': util.gen_instance_name(image_desc=image_desc,
- use_desc=use_desc,
- used_list=self.list_containers()),
- 'ephemeral': bool(ephemeral),
- 'config': (container_config
- if isinstance(container_config, dict) else {}),
- 'source': ({'type': 'image', 'fingerprint': image} if image else
- {'type': 'copy', 'source': container})
- }, wait=block)
- return LXDInstance(self, container.name, properties, config, features,
- container)
-
- def container_exists(self, container_name):
- """Check if container with name 'container_name' exists.
-
- @return_value: True if exists else False
- """
- res = True
- try:
- self.client.containers.get(container_name)
- except exceptions.LXDAPIException as e:
- res = False
- if e.response.status_code != 404:
- raise
- return res
-
- def list_containers(self):
- """List names of all containers.
-
- @return_value: list of names
- """
- return [container.name for container in self.client.containers.all()]
-
- def query_image_by_alias(self, alias):
- """Get image by alias in local image store.
-
- @param alias: alias of image
- @return_value: pylxd image (not cloud_tests.images instance)
- """
- return self.client.images.get_by_alias(alias)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/lxd/snapshot.py b/tests/cloud_tests/platforms/lxd/snapshot.py
deleted file mode 100644
index b524644f..00000000
--- a/tests/cloud_tests/platforms/lxd/snapshot.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base LXD snapshot."""
-
-from ..snapshots import Snapshot
-
-
-class LXDSnapshot(Snapshot):
- """LXD image copy backed snapshot."""
-
- platform_name = "lxd"
-
- def __init__(self, platform, properties, config, features,
- pylxd_frozen_instance):
- """Set up snapshot.
-
- @param platform: platform object
- @param properties: image properties
- @param config: image config
- @param features: supported feature flags
- """
- self.pylxd_frozen_instance = pylxd_frozen_instance
- super(LXDSnapshot, self).__init__(
- platform, properties, config, features)
-
- def launch(self, user_data, meta_data=None, block=True, start=True,
- use_desc=None):
- """Launch instance.
-
- @param user_data: user-data for the instance
- @param instance_id: instance-id for the instance
- @param block: wait until instance is created
- @param start: start instance and wait until fully started
- @param use_desc: description of snapshot instance use
- @return_value: an Instance
- """
- inst_config = {'user.user-data': user_data}
- if meta_data:
- inst_config['user.meta-data'] = meta_data
- instance = self.platform.launch_container(
- self.properties, self.config, self.features, block=block,
- image_desc=str(self), container=self.pylxd_frozen_instance.name,
- use_desc=use_desc, container_config=inst_config)
- if start:
- instance.start()
- return instance
-
- def destroy(self):
- """Clean up snapshot data."""
- self.pylxd_frozen_instance.destroy()
- super(LXDSnapshot, self).destroy()
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/nocloudkvm/image.py b/tests/cloud_tests/platforms/nocloudkvm/image.py
deleted file mode 100644
index ff5b6ad7..00000000
--- a/tests/cloud_tests/platforms/nocloudkvm/image.py
+++ /dev/null
@@ -1,79 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""NoCloud KVM Image Base Class."""
-
-from cloudinit import subp
-
-import os
-import shutil
-import tempfile
-
-from ..images import Image
-from .snapshot import NoCloudKVMSnapshot
-
-
-class NoCloudKVMImage(Image):
- """NoCloud KVM backed image."""
-
- platform_name = "nocloud-kvm"
-
- def __init__(self, platform, config, orig_img_path):
- """Set up image.
-
- @param platform: platform object
- @param config: image configuration
- @param img_path: path to the image
- """
- self.modified = False
- self._workd = tempfile.mkdtemp(prefix='NoCloudKVMImage')
- self._orig_img_path = orig_img_path
- self._img_path = os.path.join(self._workd,
- os.path.basename(self._orig_img_path))
-
- subp.subp(['qemu-img', 'create', '-f', 'qcow2',
- '-b', orig_img_path, self._img_path])
-
- super(NoCloudKVMImage, self).__init__(platform, config)
-
- def _execute(self, command, stdin=None, env=None):
- """Execute command in image, modifying image."""
- return self.mount_image_callback(command, stdin=stdin, env=env)
-
- def mount_image_callback(self, command, stdin=None, env=None):
- """Run mount-image-callback."""
-
- env_args = []
- if env:
- env_args = ['env'] + ["%s=%s" for k, v in env.items()]
-
- mic_chroot = ['sudo', 'mount-image-callback', '--system-mounts',
- '--system-resolvconf', self._img_path,
- '--', 'chroot', '_MOUNTPOINT_']
- try:
- out, err = subp.subp(mic_chroot + env_args + list(command),
- data=stdin, decode=False)
- return (out, err, 0)
- except subp.ProcessExecutionError as e:
- return (e.stdout, e.stderr, e.exit_code)
-
- def snapshot(self):
- """Create snapshot of image, block until done."""
- if not self._img_path:
- raise RuntimeError()
-
- return NoCloudKVMSnapshot(self.platform, self.properties, self.config,
- self.features, self._img_path)
-
- def destroy(self):
- """Unset path to signal image is no longer used.
-
- The removal of the images and all other items is handled by the
- framework. In some cases we want to keep the images, so let the
- framework decide whether to keep or destroy everything.
- """
- self._img_path = None
- shutil.rmtree(self._workd)
-
- super(NoCloudKVMImage, self).destroy()
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/nocloudkvm/instance.py b/tests/cloud_tests/platforms/nocloudkvm/instance.py
deleted file mode 100644
index 5140a11c..00000000
--- a/tests/cloud_tests/platforms/nocloudkvm/instance.py
+++ /dev/null
@@ -1,197 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base NoCloud KVM instance."""
-
-import copy
-import os
-import socket
-import subprocess
-import time
-import uuid
-
-from ..instances import Instance
-from cloudinit.atomic_helper import write_json
-from cloudinit import subp
-from tests.cloud_tests import LOG, util
-
-# This domain contains reverse lookups for hostnames that are used.
-# The primary reason is so sudo will return quickly when it attempts
-# to look up the hostname. i9n is just short for 'integration'.
-# see also bug 1730744 for why we had to do this.
-CI_DOMAIN = "i9n.cloud-init.io"
-
-
-class NoCloudKVMInstance(Instance):
- """NoCloud KVM backed instance."""
-
- platform_name = "nocloud-kvm"
-
- def __init__(self, platform, name, image_path, properties, config,
- features, user_data, meta_data):
- """Set up instance.
-
- @param platform: platform object
- @param name: image path
- @param image_path: path to disk image to boot.
- @param properties: dictionary of properties
- @param config: dictionary of configuration values
- @param features: dictionary of supported feature flags
- """
- super(NoCloudKVMInstance, self).__init__(
- platform, name, properties, config, features
- )
-
- self.user_data = user_data
- if meta_data:
- meta_data = copy.deepcopy(meta_data)
- else:
- meta_data = {}
-
- if 'instance-id' in meta_data:
- iid = meta_data['instance-id']
- else:
- iid = str(uuid.uuid1())
- meta_data['instance-id'] = iid
-
- self.instance_id = iid
- self.ssh_key_file = os.path.join(
- platform.config['data_dir'], platform.config['private_key'])
- self.ssh_pubkey_file = os.path.join(
- platform.config['data_dir'], platform.config['public_key'])
-
- self.ssh_pubkey = None
- if self.ssh_pubkey_file:
- with open(self.ssh_pubkey_file, "r") as fp:
- self.ssh_pubkey = fp.read().rstrip('\n')
-
- if not meta_data.get('public-keys'):
- meta_data['public-keys'] = []
- meta_data['public-keys'].append(self.ssh_pubkey)
-
- self.ssh_ip = '127.0.0.1'
- self.ssh_port = None
- self.pid = None
- self.pid_file = None
- self.console_file = None
- self.disk = image_path
- self.cache_mode = platform.config.get('cache_mode',
- 'cache=none,aio=native')
- self.meta_data = meta_data
-
- def shutdown(self, wait=True):
- """Shutdown instance."""
-
- if self.pid:
- # This relies on _execute which uses sudo over ssh. The ssh
- # connection would get killed before sudo exited, so ignore errors.
- cmd = ['shutdown', 'now']
- try:
- self._execute(cmd)
- except util.InTargetExecuteError:
- pass
- self._ssh_close()
-
- if wait:
- LOG.debug("Executed shutdown. waiting on pid %s to end",
- self.pid)
- time_for_shutdown = 120
- give_up_at = time.time() + time_for_shutdown
- pid_file_path = '/proc/%s' % self.pid
- msg = ("pid %s did not exit in %s seconds after shutdown." %
- (self.pid, time_for_shutdown))
- while True:
- if not os.path.exists(pid_file_path):
- break
- if time.time() > give_up_at:
- raise util.PlatformError("shutdown", msg)
- self.pid = None
-
- def destroy(self):
- """Clean up instance."""
- if self.pid:
- try:
- subp.subp(['kill', '-9', self.pid])
- except subp.ProcessExecutionError:
- pass
-
- if self.pid_file:
- try:
- os.remove(self.pid_file)
- except Exception:
- pass
-
- self.pid = None
- self._ssh_close()
-
- super(NoCloudKVMInstance, self).destroy()
-
- def _execute(self, command, stdin=None, env=None):
- env_args = []
- if env:
- env_args = ['env'] + ["%s=%s" for k, v in env.items()]
-
- return self._ssh(['sudo'] + env_args + list(command), stdin=stdin)
-
- def generate_seed(self, tmpdir):
- """Generate nocloud seed from user-data"""
- seed_file = os.path.join(tmpdir, '%s_seed.img' % self.name)
- user_data_file = os.path.join(tmpdir, '%s_user_data' % self.name)
- meta_data_file = os.path.join(tmpdir, '%s_meta_data' % self.name)
-
- with open(user_data_file, "w") as ud_file:
- ud_file.write(self.user_data)
-
- # meta-data can be yaml, but more easily pretty printed with json
- write_json(meta_data_file, self.meta_data)
- subp.subp(['cloud-localds', seed_file, user_data_file,
- meta_data_file])
-
- return seed_file
-
- def get_free_port(self):
- """Get a free port assigned by the kernel."""
- s = socket.socket()
- s.bind(('', 0))
- num = s.getsockname()[1]
- s.close()
- return num
-
- def start(self, wait=True, wait_for_cloud_init=False):
- """Start instance."""
- tmpdir = self.platform.config['data_dir']
- seed = self.generate_seed(tmpdir)
- self.pid_file = os.path.join(tmpdir, '%s.pid' % self.name)
- self.console_file = os.path.join(tmpdir, '%s-console.log' % self.name)
- self.ssh_port = self.get_free_port()
-
- cmd = ['./tools/xkvm',
- '--disk', '%s,%s' % (self.disk, self.cache_mode),
- '--disk', '%s' % seed,
- '--netdev', ','.join(['user',
- 'hostfwd=tcp::%s-:22' % self.ssh_port,
- 'dnssearch=%s' % CI_DOMAIN]),
- '--', '-pidfile', self.pid_file, '-vnc', 'none',
- '-m', '2G', '-smp', '2', '-nographic', '-name', self.name,
- '-serial', 'file:' + self.console_file]
- subprocess.Popen(cmd,
- close_fds=True,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
-
- while not os.path.exists(self.pid_file):
- time.sleep(1)
-
- with open(self.pid_file, 'r') as pid_f:
- self.pid = pid_f.readlines()[0].strip()
-
- if wait:
- self._wait_for_system(wait_for_cloud_init)
-
- def console_log(self):
- if not self.console_file:
- return b''
- with open(self.console_file, "rb") as fp:
- return fp.read()
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/nocloudkvm/platform.py b/tests/cloud_tests/platforms/nocloudkvm/platform.py
deleted file mode 100644
index 53c8ebf2..00000000
--- a/tests/cloud_tests/platforms/nocloudkvm/platform.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base NoCloud KVM platform."""
-import glob
-import os
-
-from simplestreams import filters
-from simplestreams import mirrors
-from simplestreams import objectstores
-from simplestreams import util as s_util
-
-from ..platforms import Platform
-from .image import NoCloudKVMImage
-from .instance import NoCloudKVMInstance
-from cloudinit import subp
-from cloudinit import util as c_util
-from tests.cloud_tests import util
-
-
-class NoCloudKVMPlatform(Platform):
- """NoCloud KVM test platform."""
-
- platform_name = 'nocloud-kvm'
-
- def get_image(self, img_conf):
- """Get image using specified image configuration.
-
- @param img_conf: configuration for image
- @return_value: cloud_tests.images instance
- """
- (url, path) = s_util.path_from_mirror_url(img_conf['mirror_url'], None)
-
- filter = filters.get_filters(
- [
- 'arch=%s' % c_util.get_dpkg_architecture(),
- 'release=%s' % img_conf['release'],
- 'ftype=disk1.img',
- ]
- )
- mirror_config = {'filters': filter,
- 'keep_items': False,
- 'max_items': 1,
- 'checksumming_reader': True,
- 'item_download': True
- }
-
- def policy(content, path):
- return s_util.read_signed(content, keyring=img_conf['keyring'])
-
- smirror = mirrors.UrlMirrorReader(url, policy=policy)
- tstore = objectstores.FileStore(img_conf['mirror_dir'])
- tmirror = mirrors.ObjectFilterMirror(config=mirror_config,
- objectstore=tstore)
- tmirror.sync(smirror, path)
-
- search_d = os.path.join(img_conf['mirror_dir'], '**',
- img_conf['release'], '**', '*.img')
-
- images = []
- for fname in glob.iglob(search_d, recursive=True):
- images.append(fname)
-
- if len(images) < 1:
- raise RuntimeError("No images found under '%s'" % search_d)
- if len(images) > 1:
- raise RuntimeError(
- "Multiple images found in '%s': %s" % (search_d,
- ' '.join(images)))
-
- image = NoCloudKVMImage(self, img_conf, images[0])
- return image
-
- def create_instance(self, properties, config, features,
- src_img_path, image_desc=None, use_desc=None,
- user_data=None, meta_data=None):
- """Create an instance
-
- @param src_img_path: image path to launch from
- @param properties: image properties
- @param config: image configuration
- @param features: image features
- @param image_desc: description of image being launched
- @param use_desc: description of container's use
- @return_value: cloud_tests.instances instance
- """
- name = util.gen_instance_name(image_desc=image_desc, use_desc=use_desc)
- img_path = os.path.join(self.config['data_dir'], name + '.qcow2')
- subp.subp(['qemu-img', 'create', '-f', 'qcow2',
- '-b', src_img_path, img_path])
-
- return NoCloudKVMInstance(self, name, img_path, properties, config,
- features, user_data, meta_data)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/nocloudkvm/snapshot.py b/tests/cloud_tests/platforms/nocloudkvm/snapshot.py
deleted file mode 100644
index 2dae3590..00000000
--- a/tests/cloud_tests/platforms/nocloudkvm/snapshot.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base NoCloud KVM snapshot."""
-import os
-import shutil
-import tempfile
-
-from ..snapshots import Snapshot
-
-
-class NoCloudKVMSnapshot(Snapshot):
- """NoCloud KVM image copy backed snapshot."""
-
- platform_name = "nocloud-kvm"
-
- def __init__(self, platform, properties, config, features, image_path):
- """Set up snapshot.
-
- @param platform: platform object
- @param properties: image properties
- @param config: image config
- @param features: supported feature flags
- @param image_path: image file to snapshot.
- """
- self._workd = tempfile.mkdtemp(prefix='NoCloudKVMSnapshot')
- snapshot = os.path.join(self._workd, 'snapshot')
- shutil.copyfile(image_path, snapshot)
- self._image_path = snapshot
-
- super(NoCloudKVMSnapshot, self).__init__(
- platform, properties, config, features)
-
- def launch(self, user_data, meta_data=None, block=True, start=True,
- use_desc=None):
- """Launch instance.
-
- @param user_data: user-data for the instance
- @param instance_id: instance-id for the instance
- @param block: wait until instance is created
- @param start: start instance and wait until fully started
- @param use_desc: description of snapshot instance use
- @return_value: an Instance
- """
- instance = self.platform.create_instance(
- self.properties, self.config, self.features,
- self._image_path, image_desc=str(self), use_desc=use_desc,
- user_data=user_data, meta_data=meta_data)
-
- if start:
- instance.start()
-
- return instance
-
- def destroy(self):
- """Clean up snapshot data."""
- shutil.rmtree(self._workd)
- super(NoCloudKVMSnapshot, self).destroy()
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/platforms.py b/tests/cloud_tests/platforms/platforms.py
deleted file mode 100644
index ac3b6563..00000000
--- a/tests/cloud_tests/platforms/platforms.py
+++ /dev/null
@@ -1,109 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base platform class."""
-import os
-import shutil
-
-from simplestreams import filters, mirrors
-from simplestreams import util as s_util
-
-from cloudinit import subp
-from cloudinit import util as c_util
-
-from tests.cloud_tests import util
-
-
-class Platform(object):
- """Base class for platforms."""
-
- platform_name = None
-
- def __init__(self, config):
- """Set up platform."""
- self.config = config
- self.tmpdir = util.mkdtemp()
- if 'data_dir' in config:
- self.data_dir = config['data_dir']
- else:
- self.data_dir = os.path.join(self.tmpdir, "data_dir")
- os.mkdir(self.data_dir)
-
- self._generate_ssh_keys(self.data_dir)
-
- def get_image(self, img_conf):
- """Get image using specified image configuration.
-
- @param img_conf: configuration for image
- @return_value: cloud_tests.images instance
- """
- raise NotImplementedError
-
- def destroy(self):
- """Clean up platform data."""
- shutil.rmtree(self.tmpdir)
-
- def _generate_ssh_keys(self, data_dir):
- """Generate SSH keys to be used with image."""
- filename = os.path.join(data_dir, self.config['private_key'])
-
- if os.path.exists(filename):
- c_util.del_file(filename)
-
- subp.subp(['ssh-keygen', '-m', 'PEM', '-t', 'rsa', '-b', '4096',
- '-f', filename, '-P', '',
- '-C', 'ubuntu@cloud_test'],
- capture=True)
-
- @staticmethod
- def _query_streams(img_conf, img_filter):
- """Query streams for latest image given a specific filter.
-
- @param img_conf: configuration for image
- @param filters: array of filters as strings format 'key=value'
- @return: dictionary with latest image information or empty
- """
- def policy(content, path):
- return s_util.read_signed(content, keyring=img_conf['keyring'])
-
- (url, path) = s_util.path_from_mirror_url(img_conf['mirror_url'], None)
- smirror = mirrors.UrlMirrorReader(url, policy=policy)
-
- config = {'max_items': 1, 'filters': filters.get_filters(img_filter)}
- tmirror = FilterMirror(config)
- tmirror.sync(smirror, path)
-
- try:
- return tmirror.json_entries[0]
- except IndexError as e:
- raise RuntimeError(
- 'no images found with filter: %s' % img_filter
- ) from e
-
-
-class FilterMirror(mirrors.BasicMirrorWriter):
- """Taken from sstream-query to return query result as json array."""
-
- def __init__(self, config=None):
- super(FilterMirror, self).__init__(config=config)
- if config is None:
- config = {}
- self.config = config
- self.filters = config.get('filters', [])
- self.json_entries = []
-
- def load_products(self, path=None, content_id=None):
- return {'content_id': content_id, 'products': {}}
-
- def filter_item(self, data, src, target, pedigree):
- return filters.filter_item(self.filters, data, src, pedigree)
-
- def insert_item(self, data, src, target, pedigree, contentsource):
- # src and target are top level products:1.0
- # data is src['products'][ped[0]]['versions'][ped[1]]['items'][ped[2]]
- # contentsource is a ContentSource if 'path' exists in data or None
- data = s_util.products_exdata(src, pedigree)
- if 'path' in data:
- data.update({'item_url': contentsource.url})
- self.json_entries.append(data)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/snapshots.py b/tests/cloud_tests/platforms/snapshots.py
deleted file mode 100644
index 0f5f8bb6..00000000
--- a/tests/cloud_tests/platforms/snapshots.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base snapshot."""
-
-
-class Snapshot(object):
- """Base class for snapshots."""
-
- platform_name = None
-
- def __init__(self, platform, properties, config, features):
- """Set up snapshot.
-
- @param platform: platform object
- @param properties: image properties
- @param config: image config
- @param features: supported feature flags
- """
- self.platform = platform
- self.properties = properties
- self.config = config
- self.features = features
-
- def __str__(self):
- """A brief description of the snapshot."""
- return '-'.join((self.properties['os'], self.properties['release']))
-
- def launch(self, user_data, meta_data=None, block=True, start=True,
- use_desc=None):
- """Launch instance.
-
- @param user_data: user-data for the instance
- @param instance_id: instance-id for the instance
- @param block: wait until instance is created
- @param start: start instance and wait until fully started
- @param use_desc: description of snapshot instance use
- @return_value: an Instance
- """
- raise NotImplementedError
-
- def destroy(self):
- """Clean up snapshot data."""
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/releases.yaml b/tests/cloud_tests/releases.yaml
deleted file mode 100644
index 6249efc5..00000000
--- a/tests/cloud_tests/releases.yaml
+++ /dev/null
@@ -1,364 +0,0 @@
-# ============================= Release Config ================================
-default_release_config:
- # global default configuration options
- default:
- # all are disabled by default
- enabled: false
- # timeout for booting image and running cloud init
- boot_timeout: 120
- # a script to run after a boot that is used to modify an image, before
- # making a snapshot of the image. may be useful for removing data left
- # behind from cloud-init booting, such as logs, to ensure that data
- # from snapshot.launch() will not include a cloud-init.log from a boot
- # used to create the snapshot, if cloud-init has not run
- boot_clean_script: |
- #!/bin/bash
- rm -rf /var/log/cloud-init.log /var/log/cloud-init-output.log \
- /var/lib/cloud/ /run/cloud-init/ /var/log/syslog
- # test script to determine if system is booted fully
- system_ready_script: |
- # permit running or degraded state as both indicate complete boot
- [ $(systemctl is-system-running) = 'running' -o
- $(systemctl is-system-running) = 'degraded' ]
- # test script to determine if cloud-init has finished
- cloud_init_ready_script: |
- [ -f '/run/cloud-init/result.json' ]
- # currently used features and their uses are:
- # features groups and additional feature settings
- feature_groups: []
- features: {}
- mirror_url: https://cloud-images.ubuntu.com/daily
- mirror_dir: '/srv/citest/images'
- keyring: /usr/share/keyrings/ubuntu-cloudimage-keyring.gpg
- # The OS version formatted as Major.Minor is used to compare releases.
- # Each release needs to define this, for example "16.04". Quoting is
- # necessary to ensure the version is treated as a string.
- version: null
-
- ec2:
- # Choose from: [ebs, instance-store]
- root-store: ebs
- boot_timeout: 300
- nocloud-kvm:
- setup_overrides: null
- override_templates: false
- # lxd specific default configuration options
- lxd:
- # default sstreams server to use for lxd image retrieval
- sstreams_server: https://us.images.linuxcontainers.org:8443
- # keep base image, avoids downloading again next run
- cache_base_image: true
- # lxd images from linuxcontainers.org do not have the nocloud seed
- # templates in place, so the image metadata must be modified
- override_templates: true
- # arg overrides to set image up
- setup_overrides:
- # lxd images from linuxcontainers.org do not come with
- # cloud-init, so must pull cloud-init in from repo using
- # setup_image.upgrade
- upgrade: true
- azurecloud:
- boot_timeout: 300
-
-features:
- # all currently supported feature flags
- all:
- - apt # image supports apt package manager
- - byobu # byobu is available in repositories
- - landscape # landscape-client available in repos
- - lxd # lxd is available in the image
- - ppa # image supports ppas
- - rpm # image supports rpms
- - snap # supports snapd
- # NOTE: the following feature flags are to work around bugs in the
- # images, and can be removed when no longer needed
- - hostname # setting system hostname works
- # NOTE: the following feature flags are to work around issues in the
- # testcases, and can be removed when no longer needed
- - apt_src_cont # default contents and format of sources.list matches
- # ubuntu sources.list
- - apt_hist_fmt # apt command history entries use full paths to apt
- # executable rather than relative paths
- - daylight_time # timezones are daylight not standard time
- - apt_up_out # 'Calculating upgrade..' present in log output from
- # apt-get dist-upgrade output
- - engb_locale # locale en_GB.UTF-8 is available
- - locale_gen # the /etc/locale.gen file exists
- - no_ntpdate # 'ntpdate' is not installed by default
- - no_file_fmt_e # the 'file' utility does not have a formatting error
- - ppa_file_name # the name of the source file added to sources.list.d has
- # the expected format for newer ubuntu releases
- - sshd # requires ssh server to be installed by default
- - ssh_key_fmt # ssh auth keys printed to console have expected format
- - syslog # test case requires syslog to be written by default
- - ubuntu_ntp # expect ubuntu.pool.ntp.org to be used as ntp server
- - ubuntu_repos # test case requres ubuntu repositories to be used
- - ubuntu_user # test case needs user with the name 'ubuntu' to exist
- # NOTE: the following feature flags are to work around issues that may
- # be considered bugs in cloud-init
- - lsb_release # image has lsb_release installed, maybe should install
- # if missing by default
- - sudo # image has sudo installed, should not be required
- # feature flag groups
- groups:
- base:
- hostname: true
- no_file_fmt_e: true
- ubuntu_specific:
- apt_src_cont: true
- apt_hist_fmt: true
- byobu: true
- daylight_time: true
- engb_locale: true
- landscape: true
- locale_gen: true
- lsb_release: true
- lxd: true
- ppa: true
- ppa_file_name: true
- snap: true
- sshd: true
- ssh_key_fmt: true
- sudo: true
- syslog: true
- ubuntu_ntp: true
- ubuntu_repos: true
- ubuntu_user: true
- debian_base:
- apt: true
- apt_up_out: true
- no_ntpdate: true
- rhel_base:
- rpm: true
-
-releases:
- # UBUNTU =================================================================
- hirsute:
- # EOL: Jan 2022
- default:
- enabled: true
- release: hirsute
- version: "21.04"
- os: ubuntu
- feature_groups:
- - base
- - debian_base
- - ubuntu_specific
- lxd:
- sstreams_server: https://cloud-images.ubuntu.com/daily
- alias: hirsute
- setup_overrides: null
- override_templates: false
- groovy:
- # EOL: Jul 2021
- default:
- enabled: true
- release: groovy
- version: "20.10"
- os: ubuntu
- feature_groups:
- - base
- - debian_base
- - ubuntu_specific
- lxd:
- sstreams_server: https://cloud-images.ubuntu.com/daily
- alias: groovy
- setup_overrides: null
- override_templates: false
- focal:
- # EOL: Apr 2025
- default:
- enabled: true
- release: focal
- version: "20.04"
- os: ubuntu
- feature_groups:
- - base
- - debian_base
- - ubuntu_specific
- lxd:
- sstreams_server: https://cloud-images.ubuntu.com/daily
- alias: focal
- setup_overrides: null
- override_templates: false
- eoan:
- # EOL: Jul 2020
- default:
- enabled: true
- release: eoan
- version: "19.10"
- os: ubuntu
- feature_groups:
- - base
- - debian_base
- - ubuntu_specific
- lxd:
- sstreams_server: https://cloud-images.ubuntu.com/daily
- alias: eoan
- setup_overrides: null
- override_templates: false
- disco:
- # EOL: Jan 2020
- default:
- enabled: true
- release: disco
- version: "19.04"
- os: ubuntu
- feature_groups:
- - base
- - debian_base
- - ubuntu_specific
- lxd:
- sstreams_server: https://cloud-images.ubuntu.com/daily
- alias: disco
- setup_overrides: null
- override_templates: false
- cosmic:
- # EOL: Jul 2019
- default:
- enabled: true
- release: cosmic
- version: "18.10"
- os: ubuntu
- feature_groups:
- - base
- - debian_base
- - ubuntu_specific
- lxd:
- sstreams_server: https://cloud-images.ubuntu.com/daily
- alias: cosmic
- setup_overrides: null
- override_templates: false
- bionic:
- # EOL: Apr 2023
- default:
- enabled: true
- release: bionic
- version: "18.04"
- os: ubuntu
- feature_groups:
- - base
- - debian_base
- - ubuntu_specific
- lxd:
- sstreams_server: https://cloud-images.ubuntu.com/daily
- alias: bionic
- setup_overrides: null
- override_templates: false
- artful:
- # EOL: Jul 2018
- default:
- enabled: true
- release: artful
- version: "17.10"
- os: ubuntu
- feature_groups:
- - base
- - debian_base
- - ubuntu_specific
- lxd:
- sstreams_server: https://cloud-images.ubuntu.com/daily
- alias: artful
- setup_overrides: null
- override_templates: false
- xenial:
- # EOL: Apr 2021
- default:
- enabled: true
- release: xenial
- version: "16.04"
- os: ubuntu
- feature_groups:
- - base
- - debian_base
- - ubuntu_specific
- lxd:
- sstreams_server: https://cloud-images.ubuntu.com/daily
- alias: xenial
- setup_overrides: null
- override_templates: false
- trusty:
- # EOL: Apr 2019
- default:
- enabled: true
- release: trusty
- version: "14.04"
- os: ubuntu
- feature_groups:
- - base
- - debian_base
- - ubuntu_specific
- features:
- apt_up_out: false
- locale_gen: false
- lxd: false
- ppa_file_name: false
- snap: false
- ssh_key_fmt: false
- no_ntpdate: false
- no_file_fmt_e: false
- system_ready_script: |
- #!/bin/bash
- # upstart based, so use old style runlevels
- [ $(runlevel | awk '{print $2}') = '2' ]
- lxd:
- sstreams_server: https://cloud-images.ubuntu.com/daily
- alias: trusty
- setup_overrides: null
- override_templates: false
- # DEBIAN =================================================================
- stretch:
- # EOL: Not yet released
- default:
- enabled: true
- feature_groups:
- - base
- - debian_base
- lxd:
- alias: debian/stretch/default
- jessie:
- # EOL: Jun 2020
- # NOTE: the cloud-init version shipped with jessie is out of date
- # tests work if an up to date deb is used
- default:
- enabled: true
- feature_groups:
- - base
- - debian_base
- lxd:
- alias: debian/jessie/default
- # CENTOS =================================================================
- centos70:
- # EOL: Jun 2024 (2020 - end of full updates)
- default:
- enabled: true
- feature_groups:
- - base
- - rhel_base
- user_data_overrides:
- preserve_hostname: true
- lxd:
- features:
- # NOTE: (LP: #1575779)
- hostname: false
- alias: centos/7/default
- centos66:
- # EOL: Nov 2020
- default:
- enabled: true
- feature_groups:
- - base
- - rhel_base
- # still supported, but only bugfixes after may 2017
- system_ready_script: |
- #!/bin/bash
- [ $(runlevel | awk '{print $2}') = '3' ]
- user_data_overrides:
- preserve_hostname: true
- lxd:
- features:
- # NOTE: (LP: #1575779)
- hostname: false
- alias: centos/6/default
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/run_funcs.py b/tests/cloud_tests/run_funcs.py
deleted file mode 100644
index 8ae91120..00000000
--- a/tests/cloud_tests/run_funcs.py
+++ /dev/null
@@ -1,75 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Run functions."""
-
-import os
-
-from tests.cloud_tests import bddeb, collect, util, verify
-
-
-def tree_collect(args):
- """Collect data using deb build from current tree.
-
- @param args: cmdline args
- @return_value: fail count
- """
- failed = 0
- tmpdir = util.TempDir(tmpdir=args.data_dir, preserve=args.preserve_data)
-
- with tmpdir as data_dir:
- args.data_dir = data_dir
- args.deb = os.path.join(tmpdir.tmpdir, 'cloud-init_all.deb')
- try:
- failed += bddeb.bddeb(args)
- failed += collect.collect(args)
- except Exception:
- failed += 1
- raise
-
- return failed
-
-
-def tree_run(args):
- """Run test suite using deb build from current tree.
-
- @param args: cmdline args
- @return_value: fail count
- """
- failed = 0
- tmpdir = util.TempDir(tmpdir=args.data_dir, preserve=args.preserve_data)
-
- with tmpdir as data_dir:
- args.data_dir = data_dir
- args.deb = os.path.join(tmpdir.tmpdir, 'cloud-init_all.deb')
- try:
- failed += bddeb.bddeb(args)
- failed += collect.collect(args)
- failed += verify.verify(args)
- except Exception:
- failed += 1
- raise
-
- return failed
-
-
-def run(args):
- """Run test suite.
-
- @param args: cmdline args
- @return_value: fail count
- """
- failed = 0
- tmpdir = util.TempDir(tmpdir=args.data_dir, preserve=args.preserve_data)
-
- with tmpdir as data_dir:
- args.data_dir = data_dir
- try:
- failed += collect.collect(args)
- failed += verify.verify(args)
- except Exception:
- failed += 1
- raise
-
- return failed
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/setup_image.py b/tests/cloud_tests/setup_image.py
deleted file mode 100644
index 69e66e3f..00000000
--- a/tests/cloud_tests/setup_image.py
+++ /dev/null
@@ -1,237 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Setup image for testing."""
-
-from functools import partial
-import os
-import yaml
-
-from tests.cloud_tests import LOG
-from tests.cloud_tests import stage, util
-
-
-def installed_package_version(image, package, ensure_installed=True):
- """Get installed version of package.
-
- @param image: cloud_tests.images instance to operate on
- @param package: name of package
- @param ensure_installed: raise error if not installed
- @return_value: cloud-init version string
- """
- os_family = util.get_os_family(image.properties['os'])
- if os_family == 'debian':
- cmd = ['dpkg-query', '-W', "--showformat=${Version}", package]
- elif os_family == 'redhat':
- cmd = ['rpm', '-q', '--queryformat', "'%{VERSION}'", package]
- else:
- raise NotImplementedError
-
- return image.execute(
- cmd, description='query version for package: {}'.format(package),
- rcs=(0,) if ensure_installed else range(0, 256))[0].strip()
-
-
-def install_deb(args, image):
- """Install deb into image.
-
- @param args: cmdline arguments, must contain --deb
- @param image: cloud_tests.images instance to operate on
- @return_value: None, may raise errors
- """
- # ensure system is compatible with package format
- os_family = util.get_os_family(image.properties['os'])
- if os_family != 'debian':
- raise NotImplementedError('install deb: {} not supported on os '
- 'family: {}'.format(args.deb, os_family))
-
- # install deb
- msg = 'install deb: "{}" into target'.format(args.deb)
- LOG.debug(msg)
- remote_path = os.path.join('/tmp', os.path.basename(args.deb))
- image.push_file(args.deb, remote_path)
- image.execute(
- ['apt-get', 'install', '--allow-downgrades', '--assume-yes',
- remote_path], description=msg)
- # check installed deb version matches package
- fmt = ['-W', "--showformat=${Version}"]
- out = image.execute(['dpkg-deb'] + fmt + [remote_path])[0]
- expected_version = out.strip()
- found_version = installed_package_version(image, 'cloud-init')
- if expected_version != found_version:
- raise OSError('install deb version "{}" does not match expected "{}"'
- .format(found_version, expected_version))
-
- LOG.debug('successfully installed: %s, version: %s', args.deb,
- found_version)
-
-
-def install_rpm(args, image):
- """Install rpm into image.
-
- @param args: cmdline arguments, must contain --rpm
- @param image: cloud_tests.images instance to operate on
- @return_value: None, may raise errors
- """
- os_family = util.get_os_family(image.properties['os'])
- if os_family != 'redhat':
- raise NotImplementedError('install rpm: {} not supported on os '
- 'family: {}'.format(args.rpm, os_family))
-
- # install rpm
- msg = 'install rpm: "{}" into target'.format(args.rpm)
- LOG.debug(msg)
- remote_path = os.path.join('/tmp', os.path.basename(args.rpm))
- image.push_file(args.rpm, remote_path)
- image.execute(['rpm', '-U', remote_path], description=msg)
-
- fmt = ['--queryformat', '"%{VERSION}"']
- (out, _err, _exit) = image.execute(['rpm', '-q'] + fmt + [remote_path])
- expected_version = out.strip()
- found_version = installed_package_version(image, 'cloud-init')
- if expected_version != found_version:
- raise OSError('install rpm version "{}" does not match expected "{}"'
- .format(found_version, expected_version))
-
- LOG.debug('successfully installed: %s, version %s', args.rpm,
- found_version)
-
-
-def upgrade(args, image):
- """Upgrade or install cloud-init from repo.
-
- @param args: cmdline arguments
- @param image: cloud_tests.images instance to operate on
- @return_value: None, may raise errors
- """
- os_family = util.get_os_family(image.properties['os'])
- if os_family == 'debian':
- cmd = 'apt-get update && apt-get install cloud-init --yes'
- elif os_family == 'redhat':
- cmd = 'sleep 10 && yum install cloud-init --assumeyes'
- else:
- raise NotImplementedError
-
- msg = 'upgrading cloud-init'
- LOG.debug(msg)
- image.execute(cmd, description=msg)
-
-
-def upgrade_full(args, image):
- """Run the system's full upgrade command.
-
- @param args: cmdline arguments
- @param image: cloud_tests.images instance to operate on
- @return_value: None, may raise errors
- """
- os_family = util.get_os_family(image.properties['os'])
- if os_family == 'debian':
- cmd = 'apt-get update && apt-get upgrade --yes'
- elif os_family == 'redhat':
- cmd = 'yum upgrade --assumeyes'
- else:
- raise NotImplementedError('upgrade command not configured for distro '
- 'from family: {}'.format(os_family))
-
- msg = 'full system upgrade'
- LOG.debug(msg)
- image.execute(cmd, description=msg)
-
-
-def run_script(args, image):
- """Run a script in the target image.
-
- @param args: cmdline arguments, must contain --script
- @param image: cloud_tests.images instance to operate on
- @return_value: None, may raise errors
- """
- msg = 'run setup image script in target image'
- LOG.debug(msg)
- image.run_script(args.script, description=msg)
-
-
-def enable_ppa(args, image):
- """Enable a ppa in the target image.
-
- @param args: cmdline arguments, must contain --ppa
- @param image: cloud_tests.image instance to operate on
- @return_value: None, may raise errors
- """
- # ppa only supported on ubuntu (maybe debian?)
- if image.properties['os'].lower() != 'ubuntu':
- raise NotImplementedError('enabling a ppa is only available on ubuntu')
-
- # add ppa with add-apt-repository and update
- ppa = 'ppa:{}'.format(args.ppa)
- msg = 'enable ppa: "{}" in target'.format(ppa)
- LOG.debug(msg)
- cmd = 'add-apt-repository --yes {} && apt-get update'.format(ppa)
- image.execute(cmd, description=msg)
-
-
-def enable_repo(args, image):
- """Enable a repository in the target image.
-
- @param args: cmdline arguments, must contain --repo
- @param image: cloud_tests.image instance to operate on
- @return_value: None, may raise errors
- """
- # find enable repo command for the distro
- os_family = util.get_os_family(image.properties['os'])
- if os_family == 'debian':
- cmd = ('echo "{}" >> "/etc/apt/sources.list" '.format(args.repo) +
- '&& apt-get update')
- elif os_family == 'centos':
- cmd = 'yum-config-manager --add-repo="{}"'.format(args.repo)
- else:
- raise NotImplementedError('enable repo command not configured for '
- 'distro from family: {}'.format(os_family))
-
- msg = 'enable repo: "{}" in target'.format(args.repo)
- LOG.debug(msg)
- image.execute(cmd, description=msg)
-
-
-def setup_image(args, image):
- """Set up image as specified in args.
-
- @param args: cmdline arguments
- @param image: cloud_tests.image instance to operate on
- @return_value: tuple of results and fail count
- """
- # update the args if necessary for this image
- overrides = image.setup_overrides
- LOG.debug('updating args for setup with: %s', overrides)
- args = util.update_args(args, overrides, preserve_old=True)
-
- # mapping of setup cmdline arg name to setup function
- # represented as a tuple rather than a dict or odict as lookup by name not
- # needed, and order is important as --script and --upgrade go at the end
- handlers = (
- # arg handler description
- ('deb', install_deb, 'setup func for --deb, install deb'),
- ('rpm', install_rpm, 'setup func for --rpm, install rpm'),
- ('repo', enable_repo, 'setup func for --repo, enable repo'),
- ('ppa', enable_ppa, 'setup func for --ppa, enable ppa'),
- ('script', run_script, 'setup func for --script, run script'),
- ('upgrade', upgrade, 'setup func for --upgrade, upgrade cloud-init'),
- ('upgrade-full', upgrade_full, 'setup func for --upgrade-full'),
- )
-
- # determine which setup functions needed
- calls = [partial(stage.run_single, desc, partial(func, args, image))
- for name, func, desc in handlers if getattr(args, name, None)]
-
- try:
- data = yaml.safe_load(
- image.read_data("/etc/cloud/build.info", decode=True))
- info = ' '.join(["%s=%s" % (k, data.get(k))
- for k in ("build_name", "serial") if k in data])
- except Exception as e:
- info = "N/A (%s)" % e
-
- LOG.info('setting up image %s (info %s)', image, info)
- res = stage.run_stage(
- 'set up for {}'.format(image), calls, continue_after_error=False)
- return res
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/stage.py b/tests/cloud_tests/stage.py
deleted file mode 100644
index d64a1dcc..00000000
--- a/tests/cloud_tests/stage.py
+++ /dev/null
@@ -1,116 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Stage a run."""
-
-import sys
-import time
-import traceback
-
-from tests.cloud_tests import LOG
-
-
-class PlatformComponent(object):
- """Context manager to safely handle platform components."""
-
- def __init__(self, get_func, preserve_instance=False):
- """Store get_<platform component> function as partial with no args.
-
- @param get_func: Callable returning an instance from the platform.
- @param preserve_instance: Boolean, when True, do not destroy instance
- after test. Used for test development.
- """
- self.get_func = get_func
- self.preserve_instance = preserve_instance
-
- def __enter__(self):
- """Create instance of platform component."""
- self.instance = self.get_func()
- return self.instance
-
- def __exit__(self, etype, value, trace):
- """Destroy instance."""
- if self.instance is not None:
- if self.preserve_instance:
- LOG.info('Preserving test instance %s', self.instance.name)
- else:
- self.instance.destroy()
-
-
-def run_single(name, call):
- """Run a single function, keeping track of results and time.
-
- @param name: name of part
- @param call: call to make
- @return_value: a tuple of result and fail count
- """
- res = {
- 'name': name,
- 'time': 0,
- 'errors': [],
- 'success': False
- }
- failed = 0
- start_time = time.time()
-
- try:
- call()
- except Exception as e:
- failed += 1
- res['errors'].append(str(e))
- LOG.error('stage part: %s encountered error: %s', name, str(e))
- trace = traceback.extract_tb(sys.exc_info()[-1])
- LOG.error('traceback:\n%s', ''.join(traceback.format_list(trace)))
-
- res['time'] = time.time() - start_time
- if failed == 0:
- res['success'] = True
-
- return res, failed
-
-
-def run_stage(parent_name, calls, continue_after_error=True):
- """Run a stage of collection, keeping track of results and failures.
-
- @param parent_name: name of stage calls are under
- @param calls: list of function call taking no params. must return a tuple
- of results and failures. may raise exceptions
- @param continue_after_error: whether or not to proceed to the next call
- after catching an exception or recording a
- failure
- @return_value: a tuple of results and failures, with result containing
- results from the function call under 'stages', and a list
- of errors (if any on this level), and elapsed time
- running stage, and the name
- """
- res = {
- 'name': parent_name,
- 'time': 0,
- 'errors': [],
- 'stages': [],
- 'success': False,
- }
- failed = 0
- start_time = time.time()
-
- for call in calls:
- try:
- (call_res, call_failed) = call()
- res['stages'].append(call_res)
- except Exception as e:
- call_failed = 1
- res['errors'].append(str(e))
- LOG.error('stage: %s encountered error: %s', parent_name, str(e))
- trace = traceback.extract_tb(sys.exc_info()[-1])
- LOG.error('traceback:\n%s', ''.join(traceback.format_list(trace)))
-
- failed += call_failed
- if call_failed and not continue_after_error:
- break
-
- res['time'] = time.time() - start_time
- if not failed:
- res['success'] = True
-
- return (res, failed)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases.yaml b/tests/cloud_tests/testcases.yaml
deleted file mode 100644
index fb9a5d27..00000000
--- a/tests/cloud_tests/testcases.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-# ============================= Base Test Config ==============================
-base_test_data:
- script_timeout: 20
- enabled: True
- required_features: []
- cloud_config: |
- #cloud-config
- collect_scripts:
- cloud-init.log: |
- #!/bin/sh
- cat /var/log/cloud-init.log
- cloud-init-output.log: |
- #!/bin/sh
- cat /var/log/cloud-init-output.log
- instance-id: |
- #!/bin/sh
- cat /run/cloud-init/.instance-id
- instance-data.json: |
- #!/bin/sh
- cat /run/cloud-init/instance-data.json
- result.json: |
- #!/bin/sh
- cat /run/cloud-init/result.json
- status.json: |
- #!/bin/sh
- cat /run/cloud-init/status.json
- package-versions: |
- #!/bin/sh
- dpkg-query --show
- build.info: |
- #!/bin/sh
- binfo=/etc/cloud/build.info
- [ -f "$binfo" ] && cat "$binfo" || echo "N/A"
- system.journal.gz: |
- #!/bin/sh
- [ -d /run/systemd ] || { echo "not systemd."; exit 0; }
- fail() { echo "ERROR:" "$@" 1>&2; exit 1; }
- journal=""
- for d in /run/log/journal /var/log/journal; do
- for f in $d/*/system.journal; do
- [ -f "$f" ] || continue
- [ -z "$journal" ] ||
- fail "multiple journal found: $f $journal."
- journal="$f"
- done
- done
- [ -f "$journal" ] || fail "no journal file found."
- gzip --to-stdout "$journal"
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/__init__.py b/tests/cloud_tests/testcases/__init__.py
deleted file mode 100644
index bb9785d3..00000000
--- a/tests/cloud_tests/testcases/__init__.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Main init."""
-
-import importlib
-import inspect
-import unittest
-
-from cloudinit.util import read_conf
-
-from tests.cloud_tests import config
-from tests.cloud_tests.testcases.base import CloudTestCase as base_test
-
-
-def discover_test(test_name):
- """Discover tests in test file for 'testname'.
-
- @return_value: list of test classes
- """
- testmod_name = 'tests.cloud_tests.testcases.{}'.format(
- config.name_sanitize(test_name))
- try:
- testmod = importlib.import_module(testmod_name)
- except NameError as e:
- raise ValueError(
- 'no test verifier found at: {}'.format(testmod_name)
- ) from e
-
- found = [mod for name, mod in inspect.getmembers(testmod)
- if (inspect.isclass(mod)
- and base_test in inspect.getmro(mod)
- and getattr(mod, '__test__', True))]
- if len(found) != 1:
- raise RuntimeError(
- "Unexpected situation, multiple tests for %s: %s" % (
- test_name, found))
-
- return found
-
-
-def get_test_class(test_name, test_data, test_conf):
- test_class = discover_test(test_name)[0]
-
- class DynamicTestSubclass(test_class):
-
- _realclass = test_class
- data = test_data
- conf = test_conf
- release_conf = read_conf(config.RELEASES_CONF)['releases']
-
- def __str__(self):
- return "%s (%s)" % (self._testMethodName,
- unittest.util.strclass(self._realclass))
-
- @classmethod
- def setUpClass(cls):
- cls.maybeSkipTest()
-
- return DynamicTestSubclass
-
-
-def get_suite(test_name, data, conf):
- """Get test suite with all tests for 'testname'.
-
- @return_value: a test suite
- """
- suite = unittest.TestSuite()
- suite.addTest(
- unittest.defaultTestLoader.loadTestsFromTestCase(
- get_test_class(test_name, data, conf)))
- return suite
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/base.py b/tests/cloud_tests/testcases/base.py
deleted file mode 100644
index 4448e0b5..00000000
--- a/tests/cloud_tests/testcases/base.py
+++ /dev/null
@@ -1,385 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Base test case module."""
-
-import crypt
-import json
-import re
-import unittest
-
-
-from cloudinit import util as c_util
-
-SkipTest = unittest.SkipTest
-
-
-class CloudTestCase(unittest.TestCase):
- """Base test class for verifiers."""
-
- # data gets populated in get_suite.setUpClass
- data = {}
- conf = None
- _cloud_config = None
- release_conf = {} # The platform's os release configuration
-
- expected_warnings = () # Subclasses set to ignore expected WARN logs
-
- @property
- def os_cfg(self):
- return self.release_conf[self.os_name]['default']
-
- def is_distro(self, distro_name):
- return self.os_cfg['os'] == distro_name
-
- @classmethod
- def maybeSkipTest(cls):
- """Present to allow subclasses to override and raise a skipTest."""
-
- def assertPackageInstalled(self, name, version=None):
- """Check dpkg-query --show output for matching package name.
-
- @param name: package base name
- @param version: string representing a package version or part of a
- version.
- """
- pkg_out = self.get_data_file('package-versions')
- pkg_match = re.search(
- '^%s\t(?P<version>.*)$' % name, pkg_out, re.MULTILINE)
- if pkg_match:
- installed_version = pkg_match.group('version')
- if not version:
- return # Success
- if installed_version.startswith(version):
- return # Success
- raise AssertionError(
- 'Expected package version %s-%s not found. Found %s' %
- name, version, installed_version)
- raise AssertionError('Package not installed: %s' % name)
-
- def os_version_cmp(self, cmp_version):
- """Compare the version of the test to comparison_version.
-
- @param: cmp_version: Either a float or a string representing
- a release os from releases.yaml (e.g. centos66)
-
- @return: -1 when version < cmp_version, 0 when version=cmp_version and
- 1 when version > cmp_version.
- """
- version = self.release_conf[self.os_name]['default']['version']
- if isinstance(cmp_version, str):
- cmp_version = self.release_conf[cmp_version]['default']['version']
- if version < cmp_version:
- return -1
- elif version == cmp_version:
- return 0
- else:
- return 1
-
- @property
- def os_name(self):
- return self.data.get('os_name', 'UNKNOWN')
-
- @property
- def platform(self):
- return self.data.get('platform', 'UNKNOWN')
-
- @property
- def cloud_config(self):
- """Get the cloud-config used by the test."""
- if not self._cloud_config:
- self._cloud_config = c_util.load_yaml(self.conf)
- return self._cloud_config
-
- def get_config_entry(self, name):
- """Get a config entry from cloud-config ensuring that it is present."""
- if name not in self.cloud_config:
- raise AssertionError('Key "{}" not in cloud config'.format(name))
- return self.cloud_config[name]
-
- def get_data_file(self, name, decode=True):
- """Get data file failing test if it is not present."""
- if name not in self.data:
- raise AssertionError('File "{}" missing from collect data'
- .format(name))
- if not decode:
- return self.data[name]
- return self.data[name].decode('utf-8')
-
- def get_instance_id(self):
- """Get recorded instance id."""
- return self.get_data_file('instance-id').strip()
-
- def get_status_data(self, data, version=None):
- """Parse result.json and status.json like data files.
-
- @param data: data to load
- @param version: cloud-init output version, defaults to 'v1'
- @return_value: dict of data or None if missing
- """
- if not version:
- version = 'v1'
- data = json.loads(data)
- return data.get(version)
-
- def get_datasource(self):
- """Get datasource name."""
- data = self.get_status_data(self.get_data_file('result.json'))
- return data.get('datasource')
-
- def test_no_stages_errors(self):
- """Ensure that there were no errors in any stage."""
- status = self.get_status_data(self.get_data_file('status.json'))
- for stage in ('init', 'init-local', 'modules-config', 'modules-final'):
- self.assertIn(stage, status)
- self.assertEqual(len(status[stage]['errors']), 0,
- 'errors {} were encountered in stage {}'
- .format(status[stage]['errors'], stage))
- result = self.get_status_data(self.get_data_file('result.json'))
- self.assertEqual(len(result['errors']), 0)
-
- def test_no_warnings_in_log(self):
- """Unexpected warnings should not be found in the log."""
- warnings = [
- line for line in self.get_data_file('cloud-init.log').splitlines()
- if 'WARN' in line]
- joined_warnings = '\n'.join(warnings)
- for expected_warning in self.expected_warnings:
- self.assertIn(
- expected_warning, joined_warnings,
- msg="Did not find %s in cloud-init.log" % expected_warning)
- # Prune expected from discovered warnings
- warnings = [w for w in warnings if expected_warning not in w]
- self.assertEqual(
- [], warnings, msg="'WARN' found inside cloud-init.log")
-
- def test_instance_data_json_ec2(self):
- """Validate instance-data.json content by ec2 platform.
-
- This content is sourced by snapd when determining snapstore endpoints.
- We validate expected values per cloud type to ensure we don't break
- snapd.
- """
- if self.platform != 'ec2':
- raise SkipTest(
- 'Skipping ec2 instance-data.json on %s' % self.platform)
- out = self.get_data_file('instance-data.json')
- if not out:
- if self.is_distro('ubuntu') and self.os_version_cmp('bionic') >= 0:
- raise AssertionError(
- 'No instance-data.json found on %s' % self.os_name)
- raise SkipTest(
- 'Skipping instance-data.json test.'
- ' OS: %s not bionic or newer' % self.os_name)
- instance_data = json.loads(out)
- self.assertCountEqual(['merged_cfg'], instance_data['sensitive_keys'])
- ds = instance_data.get('ds', {})
- v1_data = instance_data.get('v1', {})
- metadata = ds.get('meta-data', {})
- macs = metadata.get(
- 'network', {}).get('interfaces', {}).get('macs', {})
- if not macs:
- raise AssertionError('No network data from EC2 meta-data')
- # Check meta-data items we depend on
- expected_net_keys = [
- 'public-ipv4s', 'ipv4-associations', 'local-hostname',
- 'public-hostname']
- for mac_data in macs.values():
- for key in expected_net_keys:
- self.assertIn(key, mac_data)
- self.assertIsNotNone(
- metadata.get('placement', {}).get('availability-zone'),
- 'Could not determine EC2 Availability zone placement')
- self.assertIsNotNone(
- v1_data['availability_zone'], 'expected ec2 availability_zone')
- self.assertEqual('aws', v1_data['cloud_name'])
- self.assertEqual('ec2', v1_data['platform'])
- self.assertEqual(
- 'metadata (http://169.254.169.254)', v1_data['subplatform'])
- self.assertIn('i-', v1_data['instance_id'])
- self.assertIn('ip-', v1_data['local_hostname'])
- self.assertIsNotNone(v1_data['region'], 'expected ec2 region')
- self.assertIsNotNone(
- re.match(r'\d\.\d+\.\d+-\d+-aws', v1_data['kernel_release']))
- self.assertEqual(
- 'redacted for non-root user', instance_data['merged_cfg'])
- self.assertEqual(self.os_cfg['os'], v1_data['variant'])
- self.assertEqual(self.os_cfg['os'], v1_data['distro'])
- self.assertEqual(
- self.os_cfg['os'], instance_data["sys_info"]['dist'][0],
- "Unexpected sys_info dist value")
- self.assertEqual(self.os_name, v1_data['distro_release'])
- self.assertEqual(
- str(self.os_cfg['version']), v1_data['distro_version'])
- self.assertEqual('x86_64', v1_data['machine'])
- self.assertIsNotNone(
- re.match(r'3.\d\.\d', v1_data['python_version']),
- "unexpected python version: {ver}".format(
- ver=v1_data["python_version"]))
-
- def test_instance_data_json_lxd(self):
- """Validate instance-data.json content by lxd platform.
-
- This content is sourced by snapd when determining snapstore endpoints.
- We validate expected values per cloud type to ensure we don't break
- snapd.
- """
- if self.platform != 'lxd':
- raise SkipTest(
- 'Skipping lxd instance-data.json on %s' % self.platform)
- out = self.get_data_file('instance-data.json')
- if not out:
- if self.is_distro('ubuntu') and self.os_version_cmp('bionic') >= 0:
- raise AssertionError(
- 'No instance-data.json found on %s' % self.os_name)
- raise SkipTest(
- 'Skipping instance-data.json test.'
- ' OS: %s not bionic or newer' % self.os_name)
- instance_data = json.loads(out)
- v1_data = instance_data.get('v1', {})
- self.assertCountEqual([], sorted(instance_data['base64_encoded_keys']))
- self.assertEqual('unknown', v1_data['cloud_name'])
- self.assertEqual('lxd', v1_data['platform'])
- self.assertEqual(
- 'seed-dir (/var/lib/cloud/seed/nocloud-net)',
- v1_data['subplatform'])
- self.assertIsNone(
- v1_data['availability_zone'],
- 'found unexpected lxd availability_zone %s' %
- v1_data['availability_zone'])
- self.assertIn('cloud-test', v1_data['instance_id'])
- self.assertIn('cloud-test', v1_data['local_hostname'])
- self.assertIsNone(
- v1_data['region'],
- 'found unexpected lxd region %s' % v1_data['region'])
- self.assertIsNotNone(
- re.match(r'\d\.\d+\.\d+-\d+', v1_data['kernel_release']))
- self.assertEqual(
- 'redacted for non-root user', instance_data['merged_cfg'])
- self.assertEqual(self.os_cfg['os'], v1_data['variant'])
- self.assertEqual(self.os_cfg['os'], v1_data['distro'])
- self.assertEqual(
- self.os_cfg['os'], instance_data["sys_info"]['dist'][0],
- "Unexpected sys_info dist value")
- self.assertEqual(self.os_name, v1_data['distro_release'])
- self.assertEqual(
- str(self.os_cfg['version']), v1_data['distro_version'])
- self.assertEqual('x86_64', v1_data['machine'])
- self.assertIsNotNone(
- re.match(r'3.\d\.\d', v1_data['python_version']),
- "unexpected python version: {ver}".format(
- ver=v1_data["python_version"]))
-
- def test_instance_data_json_kvm(self):
- """Validate instance-data.json content by nocloud-kvm platform.
-
- This content is sourced by snapd when determining snapstore endpoints.
- We validate expected values per cloud type to ensure we don't break
- snapd.
- """
- if self.platform != 'nocloud-kvm':
- raise SkipTest(
- 'Skipping nocloud-kvm instance-data.json on %s' %
- self.platform)
- out = self.get_data_file('instance-data.json')
- if not out:
- if self.is_distro('ubuntu') and self.os_version_cmp('bionic') >= 0:
- raise AssertionError(
- 'No instance-data.json found on %s' % self.os_name)
- raise SkipTest(
- 'Skipping instance-data.json test.'
- ' OS: %s not bionic or newer' % self.os_name)
- instance_data = json.loads(out)
- v1_data = instance_data.get('v1', {})
- self.assertCountEqual([], instance_data['base64_encoded_keys'])
- self.assertEqual('unknown', v1_data['cloud_name'])
- self.assertEqual('nocloud', v1_data['platform'])
- subplatform = v1_data['subplatform']
- self.assertIsNotNone(
- re.match(r'config-disk \(\/dev\/[a-z]{3}\)', subplatform),
- 'kvm subplatform "%s" != "config-disk (/dev/...)"' % subplatform)
- self.assertIsNone(
- v1_data['availability_zone'],
- 'found unexpected kvm availability_zone %s' %
- v1_data['availability_zone'])
- self.assertIsNotNone(
- re.match(r'[\da-f]{8}(-[\da-f]{4}){3}-[\da-f]{12}',
- v1_data['instance_id']),
- 'kvm instance_id is not a UUID: %s' % v1_data['instance_id'])
- self.assertIn('ubuntu', v1_data['local_hostname'])
- self.assertIsNone(
- v1_data['region'],
- 'found unexpected lxd region %s' % v1_data['region'])
- self.assertIsNotNone(
- re.match(r'\d\.\d+\.\d+-\d+', v1_data['kernel_release']))
- self.assertEqual(
- 'redacted for non-root user', instance_data['merged_cfg'])
- self.assertEqual(self.os_cfg['os'], v1_data['variant'])
- self.assertEqual(self.os_cfg['os'], v1_data['distro'])
- self.assertEqual(
- self.os_cfg['os'], instance_data["sys_info"]['dist'][0],
- "Unexpected sys_info dist value")
- self.assertEqual(self.os_name, v1_data['distro_release'])
- self.assertEqual(
- str(self.os_cfg['version']), v1_data['distro_version'])
- self.assertEqual('x86_64', v1_data['machine'])
- self.assertIsNotNone(
- re.match(r'3.\d\.\d', v1_data['python_version']),
- "unexpected python version: {ver}".format(
- ver=v1_data["python_version"]))
-
-
-class PasswordListTest(CloudTestCase):
- """Base password test case class."""
-
- def test_shadow_passwords(self):
- """Test shadow passwords."""
- shadow = self.get_data_file('shadow')
- users = {}
- dupes = []
- for line in shadow.splitlines():
- user, encpw = line.split(":")[0:2]
- if user in users:
- dupes.append(user)
- users[user] = encpw
-
- jane_enc = "$5$iW$XsxmWCdpwIW8Yhv.Jn/R3uk6A4UaicfW5Xp7C9p9pg."
- self.assertEqual([], dupes)
- self.assertEqual(jane_enc, users['jane'])
-
- mikey_enc = "$5$xZ$B2YGGEx2AOf4PeW48KC6.QyT1W2B4rZ9Qbltudtha89"
- self.assertEqual(mikey_enc, users['mikey'])
-
- # shadow entry is $N$salt$, so we encrypt with the same format
- # and salt and expect the result.
- tom = "mypassword123!"
- fmtsalt = users['tom'][0:users['tom'].rfind("$") + 1]
- tom_enc = crypt.crypt(tom, fmtsalt)
- self.assertEqual(tom_enc, users['tom'])
-
- harry_enc = ("$6$LF$9Z2p6rWK6TNC1DC6393ec0As.18KRAvKDbfsG"
- "JEdWN3sRQRwpdfoh37EQ3yUh69tP4GSrGW5XKHxMLiKowJgm/")
- dick_enc = "$1$ssisyfpf$YqvuJLfrrW6Cg/l53Pi1n1"
-
- # these should have been changed to random values.
- self.assertNotEqual(harry_enc, users['harry'])
- self.assertTrue(users['harry'].startswith("$"))
- self.assertNotEqual(dick_enc, users['dick'])
- self.assertTrue(users['dick'].startswith("$"))
-
- self.assertNotEqual(users['harry'], users['dick'])
-
- def test_shadow_expected_users(self):
- """Test every tom, dick, and harry user in shadow."""
- out = self.get_data_file('shadow')
- self.assertIn('tom:', out)
- self.assertIn('dick:', out)
- self.assertIn('harry:', out)
- self.assertIn('jane:', out)
- self.assertIn('mikey:', out)
-
- def test_sshd_config(self):
- """Test sshd config allows passwords."""
- out = self.get_data_file('sshd_config')
- self.assertIn('PasswordAuthentication yes', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/bugs/README.md b/tests/cloud_tests/testcases/bugs/README.md
deleted file mode 100644
index 09ce0765..00000000
--- a/tests/cloud_tests/testcases/bugs/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
-# Bug Test Configs
-
-## purpose
-Configs that reproduce bugs filed against cloud-init. Having test configs for
-cloud-init bugs ensures that the fixes do not break in the future, and makes it
-easy to see how many systems and platforms are effected by a new bug.
-
-## structure
-Should have one test config for most bugs filed. The name of the test should
-contain ``lp`` followed by the bug number. It may also be useful to add a
-comment to each bug config with a summary copied from the bug report.
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/bugs/__init__.py b/tests/cloud_tests/testcases/bugs/__init__.py
deleted file mode 100644
index c6452f9c..00000000
--- a/tests/cloud_tests/testcases/bugs/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Test verifiers for cloud-init bugs.
-
-See configs/bugs/README.md for more information
-"""
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/bugs/lp1511485.py b/tests/cloud_tests/testcases/bugs/lp1511485.py
deleted file mode 100644
index 670d3aff..00000000
--- a/tests/cloud_tests/testcases/bugs/lp1511485.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestLP1511485(base.CloudTestCase):
- """Test LP# 1511485."""
-
- def test_final_message(self):
- """Test final message exists."""
- out = self.get_data_file('cloud-init-output.log')
- self.assertIn('Final message from cloud-config', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/bugs/lp1511485.yaml b/tests/cloud_tests/testcases/bugs/lp1511485.yaml
deleted file mode 100644
index ebf9763f..00000000
--- a/tests/cloud_tests/testcases/bugs/lp1511485.yaml
+++ /dev/null
@@ -1,11 +0,0 @@
-#
-# LP Bug 1511485: final_message is silent on ubuntu-12.04.5 / cloud-init 0.6.3
-#
-# 2016-11-17: Disabled as covered by module based tests
-#
-enabled: False
-cloud_config: |
- #cloud-config
- final_message: "Final message from cloud-config"
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/bugs/lp1611074.yaml b/tests/cloud_tests/testcases/bugs/lp1611074.yaml
deleted file mode 100644
index 960679d5..00000000
--- a/tests/cloud_tests/testcases/bugs/lp1611074.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
-#
-# LP Bug 1611074: Reformatting of ephemeral drive fails on resize of Azure VM
-#
-# 2016-11-18: Disabled until test written
-#
-enabled: False
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/bugs/lp1628337.py b/tests/cloud_tests/testcases/bugs/lp1628337.py
deleted file mode 100644
index a2c90481..00000000
--- a/tests/cloud_tests/testcases/bugs/lp1628337.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestLP1628337(base.CloudTestCase):
- """Test LP# 1511485."""
-
- def test_fetch_indices(self):
- """Verify no apt errors."""
- out = self.get_data_file('cloud-init-output.log')
- self.assertNotIn('W: Failed to fetch', out)
- self.assertNotIn('W: Some index files failed to download. '
- 'They have been ignored, or old ones used instead.',
- out)
-
- def test_ntp(self):
- """Verify can find ntp and install it."""
- out = self.get_data_file('cloud-init-output.log')
- self.assertNotIn('E: Unable to locate package ntp', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/bugs/lp1628337.yaml b/tests/cloud_tests/testcases/bugs/lp1628337.yaml
deleted file mode 100644
index e39b3cd8..00000000
--- a/tests/cloud_tests/testcases/bugs/lp1628337.yaml
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-# LP Bug 1628337: cloud-init tries to install NTP before even configuring the archives
-#
-required_features:
- - apt
- - lsb_release
-cloud_config: |
- #cloud-config
- ntp:
- servers: ['ntp.ubuntu.com']
- apt:
- primary:
- - arches: [default]
- uri: http://us.archive.ubuntu.com/ubuntu/
-collect_sciprts:
- ntp.conf: |
- #!/bin/bash
- cat /etc/ntp.conf
- sources.list: |
- #!/bin/bash
- cat /etc/apt/sources.list
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/README.md b/tests/cloud_tests/testcases/examples/README.md
deleted file mode 100644
index 110a223b..00000000
--- a/tests/cloud_tests/testcases/examples/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
-# Example Test Configs
-
-## Purpose
-This folder contains example cloud configs found on
-[cloudinit.readthedocs.io](https://cloudinit.readthedocs.io/en/latest/topics/examples.html).
-Examples covered by other tests, like modules, are excluded from tests here
-to prevent duplication and reduce test time.
-
-## Structure
-One test per example test config on cloudinit.readthedocs.io
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/TODO.md b/tests/cloud_tests/testcases/examples/TODO.md
deleted file mode 100644
index 8db0e98e..00000000
--- a/tests/cloud_tests/testcases/examples/TODO.md
+++ /dev/null
@@ -1,15 +0,0 @@
-# Missing Examples
-
-Below lists each of the issing examples and why it is not currently added.
-
- - Chef (takes > 60 seconds to run)
- - Puppet (takes > 60 seconds to run)
- - Manage resolve.conf (lxd backend overrides changes)
- - Adding a yum repository (need centos system)
- - Register RedHat Subscription (need centos system + subscription)
- - Adjust mount points mounted (need multiple disks)
- - Call a url when finished (need end point)
- - Reboot/poweroff when finished (how to test)
- - Disk setup (need multiple disks)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/__init__.py b/tests/cloud_tests/testcases/examples/__init__.py
deleted file mode 100644
index 39af88c2..00000000
--- a/tests/cloud_tests/testcases/examples/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Test verifiers for cloud-init examples.
-
-See configs/examples/README.md for more information
-"""
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/add_apt_repositories.py b/tests/cloud_tests/testcases/examples/add_apt_repositories.py
deleted file mode 100644
index 71eede97..00000000
--- a/tests/cloud_tests/testcases/examples/add_apt_repositories.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestAptconfigurePrimary(base.CloudTestCase):
- """Example cloud-config test."""
-
- def test_ubuntu_sources(self):
- """Test no default Ubuntu entries exist."""
- out = self.get_data_file('ubuntu.sources.list')
- self.assertEqual(0, int(out))
-
- def test_gatech_sources(self):
- """Test GaTech entires exist."""
- out = self.get_data_file('gatech.sources.list')
- self.assertEqual(20, int(out))
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/add_apt_repositories.yaml b/tests/cloud_tests/testcases/examples/add_apt_repositories.yaml
deleted file mode 100644
index 4b8575f7..00000000
--- a/tests/cloud_tests/testcases/examples/add_apt_repositories.yaml
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-# From cloud config examples on cloudinit.readthedocs.io
-#
-# 2016-11-17: Disabled as covered by module based tests
-#
-enabled: False
-required_features:
- - apt
-cloud_config: |
- #cloud-config
- apt:
- primary:
- - arches: [default]
- uri: "http://www.gtlib.gatech.edu/pub/ubuntu-releases/"
-collect_scripts:
- ubuntu.sources.list: |
- #!/bin/bash
- cat /etc/apt/sources.list | grep -v '^#' | sed '/^\s*$/d' | grep archive.ubuntu.com | wc -l
- gatech.sources.list: |
- #!/bin/bash
- cat /etc/apt/sources.list | grep -v '^#' | sed '/^\s*$/d' | grep gtlib.gatech.edu | wc -l
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/alter_completion_message.py b/tests/cloud_tests/testcases/examples/alter_completion_message.py
deleted file mode 100644
index b7b5d5e0..00000000
--- a/tests/cloud_tests/testcases/examples/alter_completion_message.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestFinalMessage(base.CloudTestCase):
- """Test cloud init module `cc_final_message`."""
-
- subs_char = '$'
-
- def get_final_message_config(self):
- """Get config for final message."""
- self.assertIn('final_message', self.cloud_config)
- return self.cloud_config['final_message']
-
- def get_final_message(self):
- """Get final message from log."""
- out = self.get_data_file('cloud-init-output.log')
- lines = len(self.get_final_message_config().splitlines())
- return '\n'.join(out.splitlines()[-1 * lines:])
-
- def test_final_message_string(self):
- """Ensure final handles regular strings."""
- for actual, config in zip(
- self.get_final_message().splitlines(),
- self.get_final_message_config().splitlines()):
- if self.subs_char not in config:
- self.assertEqual(actual, config)
-
- def test_final_message_subs(self):
- """Test variable substitution in final message."""
- # TODO: add verification of other substitutions
- patterns = {'$datasource': self.get_datasource()}
- for key, expected in patterns.items():
- index = self.get_final_message_config().splitlines().index(key)
- actual = self.get_final_message().splitlines()[index]
- self.assertEqual(actual, expected)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/alter_completion_message.yaml b/tests/cloud_tests/testcases/examples/alter_completion_message.yaml
deleted file mode 100644
index 9e154f80..00000000
--- a/tests/cloud_tests/testcases/examples/alter_completion_message.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-#
-# From cloud config examples on cloudinit.readthedocs.io
-#
-# 2016-11-17: Disabled as covered by module based tests
-#
-enabled: False
-cloud_config: |
- #cloud-config
- final_message: |
- This is my final message!
- $version
- $timestamp
- $datasource
- $uptime
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/configure_instance_trusted_ca_certificates.py b/tests/cloud_tests/testcases/examples/configure_instance_trusted_ca_certificates.py
deleted file mode 100644
index 38540eb8..00000000
--- a/tests/cloud_tests/testcases/examples/configure_instance_trusted_ca_certificates.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestTrustedCA(base.CloudTestCase):
- """Example cloud-config test."""
-
- def test_cert_count_ca(self):
- """Test correct count of CAs in .crt."""
- out = self.get_data_file('cert_count_ca')
- self.assertIn('7 /etc/ssl/certs/ca-certificates.crt', out)
-
- def test_cert_count_cloudinit(self):
- """Test correct count of CAs in .pem."""
- out = self.get_data_file('cert_count_cloudinit')
- self.assertIn('7 /etc/ssl/certs/cloud-init-ca-certs.pem', out)
-
- def test_cloudinit_certs(self):
- """Test text of cert."""
- out = self.get_data_file('cloudinit_certs')
- self.assertIn('-----BEGIN CERTIFICATE-----', out)
- self.assertIn('YOUR-ORGS-TRUSTED-CA-CERT-HERE', out)
- self.assertIn('-----END CERTIFICATE-----', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/configure_instance_trusted_ca_certificates.yaml b/tests/cloud_tests/testcases/examples/configure_instance_trusted_ca_certificates.yaml
deleted file mode 100644
index ad32b088..00000000
--- a/tests/cloud_tests/testcases/examples/configure_instance_trusted_ca_certificates.yaml
+++ /dev/null
@@ -1,41 +0,0 @@
-#
-# From cloud config examples on cloudinit.readthedocs.io
-#
-# 2016-11-17: Disabled as covered by module based tests
-#
-enabled: False
-cloud_config: |
- #cloud-config
- ca-certs:
- # If present and set to True, the 'remove-defaults' parameter will remove
- # all the default trusted CA certificates that are normally shipped with
- # Ubuntu.
- # This is mainly for paranoid admins - most users will not need this
- # functionality.
- remove-defaults: true
-
- # If present, the 'trusted' parameter should contain a certificate (or list
- # of certificates) to add to the system as trusted CA certificates.
- # Pay close attention to the YAML multiline list syntax. The example shown
- # here is for a list of multiline certificates.
- trusted:
- - |
- -----BEGIN CERTIFICATE-----
- YOUR-ORGS-TRUSTED-CA-CERT-HERE
- -----END CERTIFICATE-----
- - |
- -----BEGIN CERTIFICATE-----
- YOUR-ORGS-TRUSTED-CA-CERT-HERE
- -----END CERTIFICATE-----
-collect_scripts:
- cloudinit_certs: |
- #!/bin/bash
- cat /etc/ssl/certs/cloud-init-ca-certs.pem
- cert_count_ca: |
- #!/bin/bash
- wc -l /etc/ssl/certs/ca-certificates.crt
- cert_count_cloudinit: |
- #!/bin/bash
- wc -l /etc/ssl/certs/cloud-init-ca-certs.pem
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/configure_instances_ssh_keys.py b/tests/cloud_tests/testcases/examples/configure_instances_ssh_keys.py
deleted file mode 100644
index 691a316b..00000000
--- a/tests/cloud_tests/testcases/examples/configure_instances_ssh_keys.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestSSHKeys(base.CloudTestCase):
- """Example cloud-config test."""
-
- def test_cert_count(self):
- """Test cert count."""
- out = self.get_data_file('cert_count')
- self.assertEqual(20, int(out))
-
- def test_dsa_public(self):
- """Test DSA key has ending."""
- out = self.get_data_file('dsa_public')
- self.assertIn('ZN4XnifuO5krqAybngIy66PMEoQ= smoser@localhost', out)
-
- def test_rsa_public(self):
- """Test RSA key has specific ending."""
- out = self.get_data_file('rsa_public')
- self.assertIn('PemAWthxHO18QJvWPocKJtlsDNi3 smoser@localhost', out)
-
- def test_auth_keys(self):
- """Test authorized keys has specific ending."""
- out = self.get_data_file('auth_keys')
- self.assertIn('QPOt5Q8zWd9qG7PBl9+eiH5qV7NZ mykey@host', out)
- self.assertIn('Hj29SCmXp5Kt5/82cD/VN3NtHw== smoser@brickies', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/configure_instances_ssh_keys.yaml b/tests/cloud_tests/testcases/examples/configure_instances_ssh_keys.yaml
deleted file mode 100644
index f3eaf3ce..00000000
--- a/tests/cloud_tests/testcases/examples/configure_instances_ssh_keys.yaml
+++ /dev/null
@@ -1,63 +0,0 @@
-#
-# From cloud config examples on cloudinit.readthedocs.io
-#
-# 2016-11-17: Disabled as covered by module based tests
-#
-enabled: False
-cloud_config: |
- #cloud-config
- ssh_authorized_keys:
- - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEA3FSyQwBI6Z+nCSjUUk8EEAnnkhXlukKoUPND/RRClWz2s5TCzIkd3Ou5+Cyz71X0XmazM3l5WgeErvtIwQMyT1KjNoMhoJMrJnWqQPOt5Q8zWd9qG7PBl9+eiH5qV7NZ mykey@host
- - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZdQueUq5ozemNSj8T7enqKHOEaFoU2VoPgGEWC9RyzSQVeyD6s7APMcE82EtmW4skVEgEGSbDc1pvxzxtchBj78hJP6Cf5TCMFSXw+Fz5rF1dR23QDbN1mkHs7adr8GW4kSWqU7Q7NDwfIrJJtO7Hi42GyXtvEONHbiRPOe8stqUly7MvUoN+5kfjBM8Qqpfl2+FNhTYWpMfYdPUnE7u536WqzFmsaqJctz3gBxH9Ex7dFtrxR4qiqEr9Qtlu3xGn7Bw07/+i1D+ey3ONkZLN+LQ714cgj8fRS4Hj29SCmXp5Kt5/82cD/VN3NtHw== smoser@brickies
-
- # Send pre-generated ssh private keys to the server
- # If these are present, they will be written to /etc/ssh and
- # new random keys will not be generated
- # in addition to 'rsa' and 'dsa' as shown below, 'ecdsa' is also supported
- ssh_keys:
- rsa_private: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIBxwIBAAJhAKD0YSHy73nUgysO13XsJmd4fHiFyQ+00R7VVu2iV9Qcon2LZS/x
- 1cydPZ4pQpfjEha6WxZ6o8ci/Ea/w0n+0HGPwaxlEG2Z9inNtj3pgFrYcRztfECb
- 1j6HCibZbAzYtwIBIwJgO8h72WjcmvcpZ8OvHSvTwAguO2TkR6mPgHsgSaKy6GJo
- PUJnaZRWuba/HX0KGyhz19nPzLpzG5f0fYahlMJAyc13FV7K6kMBPXTRR6FxgHEg
- L0MPC7cdqAwOVNcPY6A7AjEA1bNaIjOzFN2sfZX0j7OMhQuc4zP7r80zaGc5oy6W
- p58hRAncFKEvnEq2CeL3vtuZAjEAwNBHpbNsBYTRPCHM7rZuG/iBtwp8Rxhc9I5w
- ixvzMgi+HpGLWzUIBS+P/XhekIjPAjA285rVmEP+DR255Ls65QbgYhJmTzIXQ2T9
- luLvcmFBC6l35Uc4gTgg4ALsmXLn71MCMGMpSWspEvuGInayTCL+vEjmNBT+FAdO
- W7D4zCpI43jRS9U06JVOeSc9CDk2lwiA3wIwCTB/6uc8Cq85D9YqpM10FuHjKpnP
- REPPOyrAspdeOAV+6VKRavstea7+2DZmSUgE
- -----END RSA PRIVATE KEY-----
-
- rsa_public: ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEAoPRhIfLvedSDKw7XdewmZ3h8eIXJD7TRHtVW7aJX1ByifYtlL/HVzJ09nilCl+MSFrpbFnqjxyL8Rr/DSf7QcY/BrGUQbZn2Kc22PemAWthxHO18QJvWPocKJtlsDNi3 smoser@localhost
-
- dsa_private: |
- -----BEGIN DSA PRIVATE KEY-----
- MIIBuwIBAAKBgQDP2HLu7pTExL89USyM0264RCyWX/CMLmukxX0Jdbm29ax8FBJT
- pLrO8TIXVY5rPAJm1dTHnpuyJhOvU9G7M8tPUABtzSJh4GVSHlwaCfycwcpLv9TX
- DgWIpSj+6EiHCyaRlB1/CBp9RiaB+10QcFbm+lapuET+/Au6vSDp9IRtlQIVAIMR
- 8KucvUYbOEI+yv+5LW9u3z/BAoGBAI0q6JP+JvJmwZFaeCMMVxXUbqiSko/P1lsa
- LNNBHZ5/8MOUIm8rB2FC6ziidfueJpqTMqeQmSAlEBCwnwreUnGfRrKoJpyPNENY
- d15MG6N5J+z81sEcHFeprryZ+D3Ge9VjPq3Tf3NhKKwCDQ0240aPezbnjPeFm4mH
- bYxxcZ9GAoGAXmLIFSQgiAPu459rCKxT46tHJtM0QfnNiEnQLbFluefZ/yiI4DI3
- 8UzTCOXLhUA7ybmZha+D/csj15Y9/BNFuO7unzVhikCQV9DTeXX46pG4s1o23JKC
- /QaYWNMZ7kTRv+wWow9MhGiVdML4ZN4XnifuO5krqAybngIy66PMEoQCFEIsKKWv
- 99iziAH0KBMVbxy03Trz
- -----END DSA PRIVATE KEY-----
-
- dsa_public: ssh-dsa AAAAB3NzaC1kc3MAAACBAM/Ycu7ulMTEvz1RLIzTbrhELJZf8Iwua6TFfQl1ubb1rHwUElOkus7xMhdVjms8AmbV1Meem7ImE69T0bszy09QAG3NImHgZVIeXBoJ/JzByku/1NcOBYilKP7oSIcLJpGUHX8IGn1GJoH7XRBwVub6Vqm4RP78C7q9IOn0hG2VAAAAFQCDEfCrnL1GGzhCPsr/uS1vbt8/wQAAAIEAjSrok/4m8mbBkVp4IwxXFdRuqJKSj8/WWxos00Ednn/ww5QibysHYULrOKJ1+54mmpMyp5CZICUQELCfCt5ScZ9GsqgmnI80Q1h3Xkwbo3kn7PzWwRwcV6muvJn4PcZ71WM+rdN/c2EorAINDTbjRo97NueM94WbiYdtjHFxn0YAAACAXmLIFSQgiAPu459rCKxT46tHJtM0QfnNiEnQLbFluefZ/yiI4DI38UzTCOXLhUA7ybmZha+D/csj15Y9/BNFuO7unzVhikCQV9DTeXX46pG4s1o23JKC/QaYWNMZ7kTRv+wWow9MhGiVdML4ZN4XnifuO5krqAybngIy66PMEoQ= smoser@localhost
-collect_scripts:
- cert_count: |
- #!/bin/bash
- ls | wc -l
- dsa_public: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_dsa_key.pub
- rsa_public: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_rsa_key.pub
- auth_keys: |
- #!/bin/bash
- cat /home/ubuntu/.ssh/authorized_keys
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/including_user_groups.py b/tests/cloud_tests/testcases/examples/including_user_groups.py
deleted file mode 100644
index 4067348d..00000000
--- a/tests/cloud_tests/testcases/examples/including_user_groups.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestUserGroups(base.CloudTestCase):
- """Example cloud-config test."""
-
- def test_group_ubuntu(self):
- """Test ubuntu group exists."""
- out = self.get_data_file('group_ubuntu')
- self.assertRegex(out, r'ubuntu:x:[0-9]{4}:')
-
- def test_group_cloud_users(self):
- """Test cloud users group exists."""
- out = self.get_data_file('group_cloud_users')
- self.assertRegex(out, r'cloud-users:x:[0-9]{4}:barfoo')
-
- def test_user_ubuntu(self):
- """Test ubuntu user exists."""
- out = self.get_data_file('user_ubuntu')
- self.assertRegex(
- out, r'ubuntu:x:[0-9]{4}:[0-9]{4}:Ubuntu:/home/ubuntu:/bin/bash')
-
- def test_user_foobar(self):
- """Test foobar user exists."""
- out = self.get_data_file('user_foobar')
- self.assertRegex(
- out, r'foobar:x:[0-9]{4}:[0-9]{4}:Foo B. Bar:/home/foobar:')
-
- def test_user_barfoo(self):
- """Test barfoo user exists."""
- out = self.get_data_file('user_barfoo')
- self.assertRegex(
- out, r'barfoo:x:[0-9]{4}:[0-9]{4}:Bar B. Foo:/home/barfoo:')
-
- def test_user_cloudy(self):
- """Test cloudy user exists."""
- out = self.get_data_file('user_cloudy')
- self.assertRegex(out, r'cloudy:x:[0-9]{3,4}:')
-
- def test_user_root_in_secret(self):
- """Test root user is in 'secret' group."""
- _user, _, groups = self.get_data_file('root_groups').partition(":")
- self.assertIn("secret", groups.split(),
- msg="User root is not in group 'secret'")
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/including_user_groups.yaml b/tests/cloud_tests/testcases/examples/including_user_groups.yaml
deleted file mode 100644
index 86e392dd..00000000
--- a/tests/cloud_tests/testcases/examples/including_user_groups.yaml
+++ /dev/null
@@ -1,56 +0,0 @@
-#
-# From cloud config examples on cloudinit.readthedocs.io
-#
-# 2016-11-17: Disabled as covered by module based tests
-#
-enabled: False
-cloud_config: |
- #cloud-config
- # Add groups to the system
- groups:
- - secret: [root]
- - cloud-users
-
- # Add users to the system. Users are added after groups are added.
- users:
- - default
- - name: foobar
- gecos: Foo B. Bar
- primary_group: foobar
- groups: users
- expiredate: '2038-01-19'
- lock_passwd: false
- passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/
- - name: barfoo
- gecos: Bar B. Foo
- sudo: ALL=(ALL) NOPASSWD:ALL
- groups: [cloud-users, secret]
- lock_passwd: true
- - name: cloudy
- gecos: Magic Cloud App Daemon User
- inactive: '5'
- system: true
-collect_scripts:
- group_ubuntu: |
- #!/bin/bash
- getent group ubuntu
- group_cloud_users: |
- #!/bin/bash
- getent group cloud-users
- user_ubuntu: |
- #!/bin/bash
- getent passwd ubuntu
- user_foobar: |
- #!/bin/bash
- getent passwd foobar
- user_barfoo: |
- #!/bin/bash
- getent passwd barfoo
- user_cloudy: |
- #!/bin/bash
- getent passwd cloudy
- root_groups: |
- #!/bin/bash
- groups root
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/install_arbitrary_packages.py b/tests/cloud_tests/testcases/examples/install_arbitrary_packages.py
deleted file mode 100644
index df133844..00000000
--- a/tests/cloud_tests/testcases/examples/install_arbitrary_packages.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestInstall(base.CloudTestCase):
- """Example cloud-config test."""
-
- def test_htop(self):
- """Verify htop installed."""
- out = self.get_data_file('htop')
- self.assertEqual(1, int(out))
-
- def test_tree(self):
- """Verify tree installed."""
- out = self.get_data_file('treeutils')
- self.assertEqual(1, int(out))
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/install_arbitrary_packages.yaml b/tests/cloud_tests/testcases/examples/install_arbitrary_packages.yaml
deleted file mode 100644
index d3980228..00000000
--- a/tests/cloud_tests/testcases/examples/install_arbitrary_packages.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-#
-# From cloud config examples on cloudinit.readthedocs.io
-#
-# 2016-11-17: Disabled as covered by module based tests
-#
-enabled: False
-cloud_config: |
- #cloud-config
- packages:
- - htop
- - tree
-collect_scripts:
- htop: |
- #!/bin/bash
- dpkg -l | grep htop | wc -l
- tree: |
- #!/bin/bash
- dpkg -l | grep tree | wc -l
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/install_run_chef_recipes.py b/tests/cloud_tests/testcases/examples/install_run_chef_recipes.py
deleted file mode 100644
index 4ec26b8f..00000000
--- a/tests/cloud_tests/testcases/examples/install_run_chef_recipes.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestChefExample(base.CloudTestCase):
- """Test chef module."""
-
- def test_chef_basic(self):
- """Test chef installed."""
- out = self.get_data_file('chef_installed')
- self.assertIn('install ok', out)
-
- # FIXME: Add more tests, and/or replace with comprehensive module tests
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/install_run_chef_recipes.yaml b/tests/cloud_tests/testcases/examples/install_run_chef_recipes.yaml
deleted file mode 100644
index 68ca95b5..00000000
--- a/tests/cloud_tests/testcases/examples/install_run_chef_recipes.yaml
+++ /dev/null
@@ -1,104 +0,0 @@
-#
-# From cloud config examples on cloudinit.readthedocs.io
-#
-# 2017-03-31: Disabled as depends on third party apt repository
-#
-enabled: False
-cloud_config: |
- #cloud-config
- # Key from https://packages.chef.io/chef.asc
- apt:
- sources:
- source1:
- source: "deb http://packages.chef.io/repos/apt/stable $RELEASE main"
- key: |
- -----BEGIN PGP PUBLIC KEY BLOCK-----
- Version: GnuPG v1.4.12 (Darwin)
- Comment: GPGTools - http://gpgtools.org
-
- mQGiBEppC7QRBADfsOkZU6KZK+YmKw4wev5mjKJEkVGlus+NxW8wItX5sGa6kdUu
- twAyj7Yr92rF+ICFEP3gGU6+lGo0Nve7KxkN/1W7/m3G4zuk+ccIKmjp8KS3qn99
- dxy64vcji9jIllVa+XXOGIp0G8GEaj7mbkixL/bMeGfdMlv8Gf2XPpp9vwCgn/GC
- JKacfnw7MpLKUHOYSlb//JsEAJqao3ViNfav83jJKEkD8cf59Y8xKia5OpZqTK5W
- ShVnNWS3U5IVQk10ZDH97Qn/YrK387H4CyhLE9mxPXs/ul18ioiaars/q2MEKU2I
- XKfV21eMLO9LYd6Ny/Kqj8o5WQK2J6+NAhSwvthZcIEphcFignIuobP+B5wNFQpe
- DbKfA/0WvN2OwFeWRcmmd3Hz7nHTpcnSF+4QX6yHRF/5BgxkG6IqBIACQbzPn6Hm
- sMtm/SVf11izmDqSsQptCrOZILfLX/mE+YOl+CwWSHhl+YsFts1WOuh1EhQD26aO
- Z84HuHV5HFRWjDLw9LriltBVQcXbpfSrRP5bdr7Wh8vhqJTPjrQnT3BzY29kZSBQ
- YWNrYWdlcyA8cGFja2FnZXNAb3BzY29kZS5jb20+iGAEExECACAFAkppC7QCGwMG
- CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRApQKupg++Caj8sAKCOXmdG36gWji/K
- +o+XtBfvdMnFYQCfTCEWxRy2BnzLoBBFCjDSK6sJqCu0IENIRUYgUGFja2FnZXMg
- PHBhY2thZ2VzQGNoZWYuaW8+iGIEExECACIFAlQwYFECGwMGCwkIBwMCBhUIAgkK
- CwQWAgMBAh4BAheAAAoJEClAq6mD74JqX94An26z99XOHWpLN8ahzm7cp13t4Xid
- AJ9wVcgoUBzvgg91lKfv/34cmemZn7kCDQRKaQu0EAgAg7ZLCVGVTmLqBM6njZEd
- Zbv+mZbvwLBSomdiqddE6u3eH0X3GuwaQfQWHUVG2yedyDMiG+EMtCdEeeRebTCz
- SNXQ8Xvi22hRPoEsBSwWLZI8/XNg0n0f1+GEr+mOKO0BxDB2DG7DA0nnEISxwFkK
- OFJFebR3fRsrWjj0KjDxkhse2ddU/jVz1BY7Nf8toZmwpBmdozETMOTx3LJy1HZ/
- Te9FJXJMUaB2lRyluv15MVWCKQJro4MQG/7QGcIfrIZNfAGJ32DDSjV7/YO+IpRY
- IL4CUBQ65suY4gYUG4jhRH6u7H1p99sdwsg5OIpBe/v2Vbc/tbwAB+eJJAp89Zeu
- twADBQf/ZcGoPhTGFuzbkcNRSIz+boaeWPoSxK2DyfScyCAuG41CY9+g0HIw9Sq8
- DuxQvJ+vrEJjNvNE3EAEdKl/zkXMZDb1EXjGwDi845TxEMhhD1dDw2qpHqnJ2mtE
- WpZ7juGwA3sGhi6FapO04tIGacCfNNHmlRGipyq5ZiKIRq9mLEndlECr8cwaKgkS
- 0wWu+xmMZe7N5/t/TK19HXNh4tVacv0F3fYK54GUjt2FjCQV75USnmNY4KPTYLXA
- dzC364hEMlXpN21siIFgB04w+TXn5UF3B4FfAy5hevvr4DtV4MvMiGLu0oWjpaLC
- MpmrR3Ny2wkmO0h+vgri9uIP06ODWIhJBBgRAgAJBQJKaQu0AhsMAAoJEClAq6mD
- 74Jq4hIAoJ5KrYS8kCwj26SAGzglwggpvt3CAJ0bekyky56vNqoegB+y4PQVDv4K
- zA==
- =IxPr
- -----END PGP PUBLIC KEY BLOCK-----
-
- chef:
-
- # Valid values are 'gems' and 'packages' and 'omnibus'
- install_type: "packages"
-
- # Boolean: run 'install_type' code even if chef-client
- # appears already installed.
- force_install: false
-
- # Chef settings
- server_url: "https://chef.yourorg.com:4000"
-
- # Node Name
- # Defaults to the instance-id if not present
- node_name: "your-node-name"
-
- # Environment
- # Defaults to '_default' if not present
- environment: "production"
-
- # Default validation name is chef-validator
- validation_name: "yourorg-validator"
- # if validation_cert's value is "system" then it is expected
- # that the file already exists on the system.
- validation_cert: |
- -----BEGIN RSA PRIVATE KEY-----
- YOUR-ORGS-VALIDATION-KEY-HERE
- -----END RSA PRIVATE KEY-----
-
- # A run list for a first boot json
- run_list:
- - "recipe[apache2]"
- - "role[db]"
-
- # Specify a list of initial attributes used by the cookbooks
- initial_attributes:
- apache:
- prefork:
- maxclients: 100
- keepalive: "off"
-
- # if install_type is 'omnibus', change the url to download
- omnibus_url: "https://www.opscode.com/chef/install.sh"
-
-
- # Capture all subprocess output into a logfile
- # Useful for troubleshooting cloud-init issues
- output: {all: '| tee -a /var/log/cloud-init-output.log'}
-
-collect_scripts:
- chef_installed: |
- #!/bin/sh
- dpkg-query -W -f '${Status}\n' chef
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/run_apt_upgrade.py b/tests/cloud_tests/testcases/examples/run_apt_upgrade.py
deleted file mode 100644
index 744e49cb..00000000
--- a/tests/cloud_tests/testcases/examples/run_apt_upgrade.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestUpgrade(base.CloudTestCase):
- """Example cloud-config test."""
-
- def test_upgrade(self):
- """Test upgrade exists in apt history."""
- out = self.get_data_file('cloud-init.log')
- self.assertIn(
- '[CLOUDINIT] util.py[DEBUG]: apt-upgrade '
- '[eatmydata apt-get --option=Dpkg::Options::=--force-confold '
- '--option=Dpkg::options::=--force-unsafe-io --assume-yes --quiet '
- 'dist-upgrade] took', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/run_apt_upgrade.yaml b/tests/cloud_tests/testcases/examples/run_apt_upgrade.yaml
deleted file mode 100644
index 2b7eae4c..00000000
--- a/tests/cloud_tests/testcases/examples/run_apt_upgrade.yaml
+++ /dev/null
@@ -1,11 +0,0 @@
-#
-# From cloud config examples on cloudinit.readthedocs.io
-#
-# 2016-11-17: Disabled as covered by module based tests
-#
-enabled: False
-cloud_config: |
- #cloud-config
- package_upgrade: true
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/run_commands.py b/tests/cloud_tests/testcases/examples/run_commands.py
deleted file mode 100644
index 01d5d4fc..00000000
--- a/tests/cloud_tests/testcases/examples/run_commands.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestRunCmd(base.CloudTestCase):
- """Example cloud-config test."""
-
- def test_run_cmd(self):
- """Test run command worked."""
- out = self.get_data_file('run_cmd')
- self.assertIn('cloud-init run cmd test', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/run_commands.yaml b/tests/cloud_tests/testcases/examples/run_commands.yaml
deleted file mode 100644
index f80eb8ce..00000000
--- a/tests/cloud_tests/testcases/examples/run_commands.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-#
-# From cloud config examples on cloudinit.readthedocs.io
-#
-# 2016-11-17: Disabled as covered by module based tests
-#
-enabled: False
-cloud_config: |
- #cloud-config
- runcmd:
- - echo cloud-init run cmd test > /var/tmp/run_cmd
-collect_scripts:
- run_cmd: |
- #!/bin/bash
- cat /var/tmp/run_cmd
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/run_commands_first_boot.py b/tests/cloud_tests/testcases/examples/run_commands_first_boot.py
deleted file mode 100644
index 3f3d8f84..00000000
--- a/tests/cloud_tests/testcases/examples/run_commands_first_boot.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestBootCmd(base.CloudTestCase):
- """Example cloud-config test."""
-
- def test_bootcmd_host(self):
- """Test boot command worked."""
- out = self.get_data_file('hosts')
- self.assertIn('192.168.1.130 us.archive.ubuntu.com', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/run_commands_first_boot.yaml b/tests/cloud_tests/testcases/examples/run_commands_first_boot.yaml
deleted file mode 100644
index 7bd803db..00000000
--- a/tests/cloud_tests/testcases/examples/run_commands_first_boot.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-#
-# From cloud config examples on cloudinit.readthedocs.io
-#
-# 2016-11-17: Disabled as covered by module based tests
-#
-enabled: False
-cloud_config: |
- #cloud-config
- bootcmd:
- - echo 192.168.1.130 us.archive.ubuntu.com > /etc/hosts
-collect_scripts:
- hosts: |
- #!/bin/bash
- cat /etc/hosts
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/setup_run_puppet.yaml b/tests/cloud_tests/testcases/examples/setup_run_puppet.yaml
deleted file mode 100644
index e366c042..00000000
--- a/tests/cloud_tests/testcases/examples/setup_run_puppet.yaml
+++ /dev/null
@@ -1,55 +0,0 @@
-#
-# From cloud config examples on cloudinit.readthedocs.io
-#
-# 2016-11-17: Disabled as test suite fails this long running test currently
-#
-enabled: False
-cloud_config: |
- #cloud-config
- puppet:
- # Every key present in the conf object will be added to puppet.conf:
- # [name]
- # subkey=value
- #
- # For example the configuration below will have the following section
- # added to puppet.conf:
- # [puppetd]
- # server=puppetmaster.example.org
- # certname=i-0123456.ip-X-Y-Z.cloud.internal
- #
- # The puppmaster ca certificate will be available in
- # /var/lib/puppet/ssl/certs/ca.pem
- conf:
- agent:
- server: "puppetmaster.example.org"
- # certname supports substitutions at runtime:
- # %i: instanceid
- # Example: i-0123456
- # %f: fqdn of the machine
- # Example: ip-X-Y-Z.cloud.internal
- #
- # NB: the certname will automatically be lowercased as required by puppet
- certname: "%i.%f"
- # ca_cert is a special case. It won't be added to puppet.conf.
- # It holds the puppetmaster certificate in pem format.
- # It should be a multi-line string (using the | yaml notation for
- # multi-line strings).
- # The puppetmaster certificate is located in
- # /var/lib/puppet/ssl/ca/ca_crt.pem on the puppetmaster host.
- #
- ca_cert: |
- -----BEGIN CERTIFICATE-----
- MIICCTCCAXKgAwIBAgIBATANBgkqhkiG9w0BAQUFADANMQswCQYDVQQDDAJjYTAe
- Fw0xMDAyMTUxNzI5MjFaFw0xNTAyMTQxNzI5MjFaMA0xCzAJBgNVBAMMAmNhMIGf
- MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCu7Q40sm47/E1Pf+r8AYb/V/FWGPgc
- b014OmNoX7dgCxTDvps/h8Vw555PdAFsW5+QhsGr31IJNI3kSYprFQcYf7A8tNWu
- 1MASW2CfaEiOEi9F1R3R4Qlz4ix+iNoHiUDTjazw/tZwEdxaQXQVLwgTGRwVa+aA
- qbutJKi93MILLwIDAQABo3kwdzA4BglghkgBhvhCAQ0EKxYpUHVwcGV0IFJ1Ynkv
- T3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwDwYDVR0TAQH/BAUwAwEB/zAd
- BgNVHQ4EFgQUu4+jHB+GYE5Vxo+ol1OAhevspjAwCwYDVR0PBAQDAgEGMA0GCSqG
- SIb3DQEBBQUAA4GBAH/rxlUIjwNb3n7TXJcDJ6MMHUlwjr03BDJXKb34Ulndkpaf
- +GAlzPXWa7bO908M9I8RnPfvtKnteLbvgTK+h+zX1XCty+S2EQWk29i2AdoqOTxb
- hppiGMp0tT5Havu4aceCXiy2crVcudj3NFciy8X66SoECemW9UYDCb9T5D0d
- -----END CERTIFICATE-----
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/writing_out_arbitrary_files.py b/tests/cloud_tests/testcases/examples/writing_out_arbitrary_files.py
deleted file mode 100644
index 7bd520f6..00000000
--- a/tests/cloud_tests/testcases/examples/writing_out_arbitrary_files.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestWriteFiles(base.CloudTestCase):
- """Example cloud-config test."""
-
- def test_b64(self):
- """Test b64 encoded file reads as ascii."""
- out = self.get_data_file('file_b64')
- self.assertIn('ASCII text', out)
-
- def test_binary(self):
- """Test binary file reads as executable."""
- out = self.get_data_file('file_binary')
- self.assertIn('ELF 64-bit LSB executable, x86-64, version 1', out)
-
- def test_gzip(self):
- """Test gzip file shows up as a shell script."""
- out = self.get_data_file('file_gzip')
- self.assertIn('POSIX shell script, ASCII text executable', out)
-
- def test_text(self):
- """Test text shows up as ASCII text."""
- out = self.get_data_file('file_text')
- self.assertIn('ASCII text', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/examples/writing_out_arbitrary_files.yaml b/tests/cloud_tests/testcases/examples/writing_out_arbitrary_files.yaml
deleted file mode 100644
index 6f78f994..00000000
--- a/tests/cloud_tests/testcases/examples/writing_out_arbitrary_files.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-#
-# From cloud config examples on cloudinit.readthedocs.io
-#
-# 2016-11-17: Disabled as covered by module based tests
-#
-enabled: False
-cloud_config: |
- #cloud-config
- write_files:
- - encoding: b64
- content: CiMgVGhpcyBmaWxlIGNvbnRyb2xzIHRoZSBzdGF0ZSBvZiBTRUxpbnV4
- owner: root:root
- path: /root/file_b64
- permissions: '0644'
- - content: |
- # My new /root/file_text
-
- SMBDOPTIONS="-D"
- path: /root/file_text
- - content: !!binary |
- f0VMRgIBAQAAAAAAAAAAAAIAPgABAAAAwARAAAAAAABAAAAAAAAAAJAVAAAAAAAAAAAAAEAAOAAI
- AEAAHgAdAAYAAAAFAAAAQAAAAAAAAABAAEAAAAAAAEAAQAAAAAAAwAEAAAAAAADAAQAAAAAAAAgA
- AAAAAAAAAwAAAAQAAAAAAgAAAAAAAAACQAAAAAAAAAJAAAAAAAAcAAAAAAAAABwAAAAAAAAAAQAA
- path: /root/file_binary
- permissions: '0555'
- - encoding: gzip
- content: !!binary |
- H4sIAIDb/U8C/1NW1E/KzNMvzuBKTc7IV8hIzcnJVyjPL8pJ4QIA6N+MVxsAAAA=
- path: /root/file_gzip
- permissions: '0755'
-collect_scripts:
- file_b64: |
- #!/bin/bash
- file /root/file_b64
- file_text: |
- #!/bin/bash
- file /root/file_text
- file_binary: |
- #!/bin/bash
- file /root/file_binary
- file_gzip: |
- #!/bin/bash
- file /root/file_gzip
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/main/README.md b/tests/cloud_tests/testcases/main/README.md
deleted file mode 100644
index 60346063..00000000
--- a/tests/cloud_tests/testcases/main/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
-# Main Functionality Test Configs
-
-## purpose
-Test main features and config options of cloud-init such as logging, output
-redirection, early init and integration with init system
-
-## structure
-Should have one or more test configs for all main cloud-init output and logging
-options, and basic functionality test cases
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/main/__init__.py b/tests/cloud_tests/testcases/main/__init__.py
deleted file mode 100644
index 0a592637..00000000
--- a/tests/cloud_tests/testcases/main/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Test verifiers for cloud-init main features.
-
-See configs/main/README.md for more information
-"""
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/main/command_output_simple.py b/tests/cloud_tests/testcases/main/command_output_simple.py
deleted file mode 100644
index 80a2c8d7..00000000
--- a/tests/cloud_tests/testcases/main/command_output_simple.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestCommandOutputSimple(base.CloudTestCase):
- """Test functionality of simple output redirection."""
-
- expected_warnings = ('Stdout, stderr changing to',)
-
- def test_output_file(self):
- """Ensure that the output file is not empty and has all stages."""
- data = self.get_data_file('cloud-init-test-output')
- self.assertNotEqual(len(data), 0, "specified log empty")
- self.assertEqual(self.get_config_entry('final_message'),
- data.splitlines()[-1].strip())
- # TODO: need to test that all stages redirected here
-
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/main/command_output_simple.yaml b/tests/cloud_tests/testcases/main/command_output_simple.yaml
deleted file mode 100644
index 08ca8940..00000000
--- a/tests/cloud_tests/testcases/main/command_output_simple.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-#
-# Test functionality of simple output redirection
-#
-cloud_config: |
- #cloud-config
- output: { all: "| tee -a /var/log/cloud-init-test-output" }
- final_message: "should be last line in cloud-init-test-output file"
-collect_scripts:
- cloud-init-test-output: |
- #!/bin/bash
- cat /var/log/cloud-init-test-output
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/README.md b/tests/cloud_tests/testcases/modules/README.md
deleted file mode 100644
index d66101f2..00000000
--- a/tests/cloud_tests/testcases/modules/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
-# Module Test Configs
-
-## Purpose
-Test functionality of cloud config modules. See
-[here](https://cloudinit.readthedocs.io/en/latest/topics/modules.html) for
-a full list.
-
-## Structure
-Should have one or more test configs for each module in cloudinit/config/. The
-name of the test should indicate which module the config is verifying.
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/TODO.md b/tests/cloud_tests/testcases/modules/TODO.md
deleted file mode 100644
index 9513cb2d..00000000
--- a/tests/cloud_tests/testcases/modules/TODO.md
+++ /dev/null
@@ -1,95 +0,0 @@
-# TODO
-
-The following lists complete or partially misisng modules. If a module is
-listed with nothing below it indicates that no work is completed on that
-module. If there is a list below the module name that is the remainig
-identified work.
-
-## apt_configure
-
- * apt_get_wrapper
- * What does this do? How to use it?
- * apt_get_command
- * To specify a different 'apt-get' command, set 'apt_get_command'.
- This must be a list, and the subcommand (update, upgrade) is appended to it.
- * Modify default and verify the options got passed correctly.
- * preserve sources
- * TBD
-
-## chef
-2016-11-17: Tests took > 60 seconds and test framework times out currently.
-
-## disable EC2 metadata
-
-## disk setup
-
-## emit upstart
-
-## fan
-
-## growpart
-
-## grub dpkg
-
-## landscape
-2016-11-17: Module is not working
-
-## lxd
-2016-11-17: Need a zfs backed test written
-
-## mcollective
-
-## migrator
-
-## mounts
-
-## phone home
-
-## power state change
-
-## puppet
-2016-11-17: Tests took > 60 seconds and test framework times out currently.
-
-## resizefs
-
-## resolv conf
-2016-11-17: Issues with changing resolv.conf and lxc backend.
-
-## redhat subscription
-2016-11-17: Need RH support in test framework.
-
-## rightscale userdata
-2016-11-17: Specific to RightScale cloud enviornment.
-
-## rsyslog
-
-## scripts per boot
-Not applicable to write a test for this as it specifies when something should be run.
-
-## scripts per instance
-Not applicable to write a test for this as it specifies when something should be run.
-
-## scripts per once
-Not applicable to write a test for this as it specifies when something should be run.
-
-## scripts user
-Not applicable to write a test for this as it specifies when something should be run.
-
-## scripts vendor
-Not applicable to write a test for this as it specifies when something should be run.
-
-## snap
-2019-12-19: Need to investigate
-
-## spacewalk
-
-## ssh authkey fingerprints
-The authkey_hash key does not appear to work. In fact the default claims to be md5, however syslog only shows sha256
-
-## update etc hosts
-2016-11-17: Issues with changing /etc/hosts and lxc backend.
-
-## yum add repo
-2016-11-17: Need RH support in test framework.
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/__init__.py b/tests/cloud_tests/testcases/modules/__init__.py
deleted file mode 100644
index 6ab8114d..00000000
--- a/tests/cloud_tests/testcases/modules/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Test verifiers for cloud-init cc modules.
-
-See configs/modules/README.md for more information
-"""
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_conf.py b/tests/cloud_tests/testcases/modules/apt_configure_conf.py
deleted file mode 100644
index 3bf93447..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_conf.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestAptconfigureConf(base.CloudTestCase):
- """Test apt-configure module."""
-
- def test_apt_conf_assumeyes(self):
- """Test config assumes true."""
- out = self.get_data_file('94cloud-init-config')
- self.assertIn('Assume-Yes "true";', out)
-
- def test_apt_conf_fixbroken(self):
- """Test config fixes broken."""
- out = self.get_data_file('94cloud-init-config')
- self.assertIn('Fix-Broken "true";', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_conf.yaml b/tests/cloud_tests/testcases/modules/apt_configure_conf.yaml
deleted file mode 100644
index de453000..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_conf.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-#
-# Provide a configuration for APT
-#
-required_features:
- - apt
-cloud_config: |
- #cloud-config
- apt:
- conf: |
- APT {
- Get {
- Assume-Yes "true";
- Fix-Broken "true";
- }
- }
-collect_scripts:
- 94cloud-init-config: |
- #!/bin/bash
- cat /etc/apt/apt.conf.d/94cloud-init-config
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_disable_suites.py b/tests/cloud_tests/testcases/modules/apt_configure_disable_suites.py
deleted file mode 100644
index eabe4607..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_disable_suites.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestAptconfigureDisableSuites(base.CloudTestCase):
- """Test apt-configure module."""
-
- def test_empty_sourcelist(self):
- """Test source list is empty."""
- out = self.get_data_file('sources.list')
- self.assertEqual('', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_disable_suites.yaml b/tests/cloud_tests/testcases/modules/apt_configure_disable_suites.yaml
deleted file mode 100644
index 98800673..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_disable_suites.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-#
-# Disables everything in sources.list
-#
-required_features:
- - apt
- - lsb_release
-cloud_config: |
- #cloud-config
- apt:
- disable_suites:
- - $RELEASE
- - $RELEASE-updates
- - $RELEASE-backports
- - $RELEASE-security
-collect_scripts:
- sources.list: |
- #!/bin/bash
- grep -v '^#' /etc/apt/sources.list | sed '/^\s*$/d'
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_primary.py b/tests/cloud_tests/testcases/modules/apt_configure_primary.py
deleted file mode 100644
index 4950a2ef..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_primary.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestAptconfigurePrimary(base.CloudTestCase):
- """Test apt-configure module."""
-
- def test_ubuntu_sources(self):
- """Test no default Ubuntu entries exist."""
- out = self.get_data_file('sources.list')
- ubuntu_source_count = len(
- [line for line in out.split('\n') if 'archive.ubuntu.com' in line])
- self.assertEqual(0, ubuntu_source_count)
-
- def test_gatech_sources(self):
- """Test GaTech entries exist."""
- out = self.get_data_file('sources.list')
- gatech_source_count = len(
- [line for line in out.split('\n') if 'gtlib.gatech.edu' in line])
- self.assertGreater(gatech_source_count, 0)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_primary.yaml b/tests/cloud_tests/testcases/modules/apt_configure_primary.yaml
deleted file mode 100644
index cc067d4f..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_primary.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-#
-# Setup a custome primary sources.list
-#
-required_features:
- - apt
- - apt_src_cont
-cloud_config: |
- #cloud-config
- apt:
- primary:
- - arches:
- - default
- uri: "http://www.gtlib.gatech.edu/pub/ubuntu-releases/"
-collect_scripts:
- sources.list: |
- #!/bin/bash
- cat /etc/apt/sources.list
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_proxy.py b/tests/cloud_tests/testcases/modules/apt_configure_proxy.py
deleted file mode 100644
index 0c61b6cc..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_proxy.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestAptconfigureProxy(base.CloudTestCase):
- """Test apt-configure module."""
-
- def test_proxy_config(self):
- """Test proxy options added to apt config."""
- out = self.get_data_file('90cloud-init-aptproxy')
- self.assertIn(
- 'Acquire::http::Proxy "http://squid.internal:3128";', out)
- self.assertIn(
- 'Acquire::http::Proxy "http://squid.internal:3128";', out)
- self.assertIn(
- 'Acquire::ftp::Proxy "ftp://squid.internal:3128";', out)
- self.assertIn(
- 'Acquire::https::Proxy "https://squid.internal:3128";', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_proxy.yaml b/tests/cloud_tests/testcases/modules/apt_configure_proxy.yaml
deleted file mode 100644
index be6c6f81..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_proxy.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
-#
-# Set apt proxy
-#
-required_features:
- - apt
-cloud_config: |
- #cloud-config
- apt:
- proxy: "http://squid.internal:3128"
- http_proxy: "http://squid.internal:3128"
- ftp_proxy: "ftp://squid.internal:3128"
- https_proxy: "https://squid.internal:3128"
-collect_scripts:
- 90cloud-init-aptproxy: |
- #!/bin/bash
- cat /etc/apt/apt.conf.d/90cloud-init-aptproxy
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_security.py b/tests/cloud_tests/testcases/modules/apt_configure_security.py
deleted file mode 100644
index 7d7e2585..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_security.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestAptconfigureSecurity(base.CloudTestCase):
- """Test apt-configure module."""
-
- def test_security_mirror(self):
- """Test security lines added and uncommented in source.list."""
- out = self.get_data_file('sources.list')
- self.assertEqual(6, int(out))
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_security.yaml b/tests/cloud_tests/testcases/modules/apt_configure_security.yaml
deleted file mode 100644
index 83dd51df..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_security.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
-#
-# Add security to sources.list
-#
-required_features:
- - apt
- - ubuntu_repos
-cloud_config: |
- #cloud-config
- apt:
- security:
- - arches:
- - default
-collect_scripts:
- sources.list: |
- #!/bin/bash
- grep -c security.ubuntu.com /etc/apt/sources.list
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_sources_key.py b/tests/cloud_tests/testcases/modules/apt_configure_sources_key.py
deleted file mode 100644
index d9061f3c..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_sources_key.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestAptconfigureSourcesKey(base.CloudTestCase):
- """Test apt-configure module."""
-
- def test_apt_key_list(self):
- """Test key list updated."""
- out = self.get_data_file('apt_key_list')
- self.assertIn(
- '1FF0 D853 5EF7 E719 E5C8 1B9C 083D 06FB E4D3 04DF', out)
- self.assertIn('Launchpad PPA for cloud init development team', out)
-
- def test_source_list(self):
- """Test source.list updated."""
- out = self.get_data_file('sources.list')
- self.assertIn(
- 'http://ppa.launchpad.net/cloud-init-dev/test-archive/ubuntu', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_sources_key.yaml b/tests/cloud_tests/testcases/modules/apt_configure_sources_key.yaml
deleted file mode 100644
index bde9398a..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_sources_key.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-#
-# Add a sources.list entry with a given key (Debian Jessie)
-#
-required_features:
- - apt
- - lsb_release
-cloud_config: |
- #cloud-config
- apt:
- sources:
- source1:
- source: "deb http://ppa.launchpad.net/cloud-init-dev/test-archive/ubuntu $RELEASE main"
- key: |
- -----BEGIN PGP PUBLIC KEY BLOCK-----
- Version: SKS 1.1.6
- Comment: Hostname: keyserver.ubuntu.com
-
- mQINBFbZRUIBEAC+A0PIKYBP9kLC4hQtRrffRS11uLo8/BdtmOdrlW0hpPHzCfKnjR3tvSEI
- lqPHG1QrrjAXKZDnZMRz+h/px7lUztvytGzHPSJd5ARUzAyjyRezUhoJ3VSCxrPqx62avuWf
- RfoJaIeHfDehL5/dTVkyiWxfVZ369ZX6JN2AgLsQTeybTQ75+2z0xPrrhnGmgh6g0qTYcAaq
- M5ONOGiqeSBX/Smjh6ALy5XkhUiFGLsI7Yluf6XSICY/x7gd6RAfgSIQrUTNMoS1sqhT4aot
- +xvOfQy8ySkfAK4NddXql6E/+ZqTmBY/Lr0YklFBy8jGT+UysfiIznPMIwbmgq5Li7BtDDtX
- b8Uyi4edPpjtextezfXYn4NVIpPL5dPZS/FXh4HpzyH0pYCfrH4QDGA7i52AGmhpiOFjJMo6
- N33sdjZHOH/2Vyp+QZaQnsdUAi1N4M6c33tQbpIScn1SY+El8z5JDA4PBzkw8HpLCi1gGoa6
- V4kfbWqXXbGAJFkLkP/vc4+pY9axOlmCkJg7xCPwhI75y1cONgovhz+BEXOzolh5KZuGbGbj
- xe0wva5DLBeIg7EQFf+99pOS7Syby3Xpm6ZbswEFV0cllK4jf/QMjtfInxobuMoI0GV0bE5l
- WlRtPCK5FnbHwxi0wPNzB/5fwzJ77r6HgPrR0OkT0lWmbUyoOQARAQABtC1MYXVuY2hwYWQg
- UFBBIGZvciBjbG91ZCBpbml0IGRldmVsb3BtZW50IHRlYW2JAjgEEwECACIFAlbZRUICGwMG
- CwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEAg9Bvvk0wTfHfcP/REK5N2s1JYc69qEa9ZN
- o6oi+A7l6AYw+ZY88O5TJe7F9otv5VXCIKSUT0Vsepjgf0mtXAgf/sb2lsJn/jp7tzgov3YH
- vSrkTkRydz8xcA87gwQKePuvTLxQpftF4flrBxgSueIn5O/tPrBOxLz7EVYBc78SKg9aj9L2
- yUp+YuNevlwfZCTYeBb9r3FHaab2HcgkwqYch66+nKYfwiLuQ9NzXXm0Wn0JcEQ6pWvJscbj
- C9BdawWovfvMK5/YLfI6Btm7F4mIpQBdhSOUp/YXKmdvHpmwxMCN2QhqYK49SM7qE9aUDbJL
- arppSEBtlCLWhRBZYLTUna+BkuQ1bHz4St++XTR49Qd7vDERALpApDjB2dxPfMiBzCMwQQyq
- uy13exU8o2ETLg+dZSLfDTzrBNsBFmXlw8WW17nTISYdKeGKL+QdlUjpzdwUMMzHhAO8SmMH
- zjeSlDSRMXBJFAFSbCl7EwmMKa3yVX0zInT91fNllZ3iatAmtVdqVH/BFQfTIMH2ET7A8WzJ
- ZzVSuMRhqoKdr5AMcHuJGPUoVkVJHQA+NNvEiXSysF3faL7jmKapmUwrhpYYX2H8pf+VMu2e
- cLflKTI28dl+ZQ4Pl/aVsxrti/pzhdYy05Sn5ddtySyIkvo8L1cU5MWpbvSlFPkTstBUDLBf
- pb0uBy+g0oxJQg15
- =uy53
- -----END PGP PUBLIC KEY BLOCK-----
-collect_scripts:
- sources.list: |
- #!/bin/bash
- cat /etc/apt/sources.list.d/source1.list
- apt_key_list: |
- #!/bin/bash
- apt-key finger
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_sources_keyserver.py b/tests/cloud_tests/testcases/modules/apt_configure_sources_keyserver.py
deleted file mode 100644
index ddc86174..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_sources_keyserver.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestAptconfigureSourcesKeyserver(base.CloudTestCase):
- """Test apt-configure module."""
-
- def test_apt_key_list(self):
- """Test specific key added."""
- out = self.get_data_file('apt_key_list')
- self.assertIn(
- '1FF0 D853 5EF7 E719 E5C8 1B9C 083D 06FB E4D3 04DF', out)
- self.assertIn('Launchpad PPA for cloud init development team', out)
-
- def test_source_list(self):
- """Test source.list updated."""
- out = self.get_data_file('sources.list')
- self.assertIn(
- 'http://ppa.launchpad.net/cloud-init-dev/test-archive/ubuntu', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_sources_keyserver.yaml b/tests/cloud_tests/testcases/modules/apt_configure_sources_keyserver.yaml
deleted file mode 100644
index 25088135..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_sources_keyserver.yaml
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-# Add a sources.list entry with a key from a keyserver
-#
-required_features:
- - apt
- - lsb_release
-cloud_config: |
- #cloud-config
- apt:
- sources:
- source1:
- keyid: 1FF0D8535EF7E719E5C81B9C083D06FBE4D304DF
- keyserver: keyserver.ubuntu.com
- source: "deb http://ppa.launchpad.net/cloud-init-dev/test-archive/ubuntu $RELEASE main"
-collect_scripts:
- sources.list: |
- #!/bin/bash
- cat /etc/apt/sources.list.d/source1.list
- apt_key_list: |
- #!/bin/bash
- apt-key finger
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_sources_list.py b/tests/cloud_tests/testcases/modules/apt_configure_sources_list.py
deleted file mode 100644
index cf84e056..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_sources_list.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestAptconfigureSourcesList(base.CloudTestCase):
- """Test apt-configure module."""
-
- def test_sources_list(self):
- """Test sources.list includes sources."""
- out = self.get_data_file('sources.list')
-
- # Verify we have 6 entires
- self.assertEqual(6, len(out.rstrip().split('\n')))
-
- # Verify the keys generated the list correctly
- self.assertRegex(out, r'deb http:\/\/archive.ubuntu.com\/ubuntu '
- '[a-z].* main restricted')
- self.assertRegex(out, r'deb-src http:\/\/archive.ubuntu.com\/ubuntu '
- '[a-z].* main restricted')
- self.assertRegex(out, r'deb http:\/\/archive.ubuntu.com\/ubuntu '
- '[a-z].* universe restricted')
- self.assertRegex(out, r'deb-src http:\/\/archive.ubuntu.com\/ubuntu '
- '[a-z].* universe restricted')
- self.assertRegex(out, r'deb http:\/\/security.ubuntu.com\/ubuntu '
- '[a-z].*security multiverse')
- self.assertRegex(out, r'deb-src http:\/\/security.ubuntu.com\/ubuntu '
- '[a-z].*security multiverse')
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_sources_list.yaml b/tests/cloud_tests/testcases/modules/apt_configure_sources_list.yaml
deleted file mode 100644
index 87e470c1..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_sources_list.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-#
-# Generate a sources.list
-#
-required_features:
- - apt
- - lsb_release
-cloud_config: |
- #cloud-config
- apt:
- primary:
- - arches: [default]
- uri: http://archive.ubuntu.com/ubuntu
- security:
- - arches: [default]
- uri: http://security.ubuntu.com/ubuntu
- sources_list: |
- deb $MIRROR $RELEASE main restricted
- deb-src $MIRROR $RELEASE main restricted
- deb $PRIMARY $RELEASE universe restricted
- deb-src $PRIMARY $RELEASE universe restricted
- deb $SECURITY $RELEASE-security multiverse
- deb-src $SECURITY $RELEASE-security multiverse
-collect_scripts:
- sources.list: |
- #/bin/bash
- cat /etc/apt/sources.list
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_sources_ppa.py b/tests/cloud_tests/testcases/modules/apt_configure_sources_ppa.py
deleted file mode 100644
index dfbdeadf..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_sources_ppa.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestAptconfigureSourcesPPA(base.CloudTestCase):
- """Test apt-configure module."""
-
- def test_ppa(self):
- """Test specific ppa added."""
- out = self.get_data_file('sources.list')
- self.assertIn(
- 'http://ppa.launchpad.net/cloud-init-dev/test-archive/ubuntu', out)
-
- def test_ppa_key(self):
- """Test ppa key added."""
- out = self.get_data_file('apt-key')
- self.assertIn(
- '1FF0 D853 5EF7 E719 E5C8 1B9C 083D 06FB E4D3 04DF', out)
- self.assertIn('Launchpad PPA for cloud init development team', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_configure_sources_ppa.yaml b/tests/cloud_tests/testcases/modules/apt_configure_sources_ppa.yaml
deleted file mode 100644
index b997bcfb..00000000
--- a/tests/cloud_tests/testcases/modules/apt_configure_sources_ppa.yaml
+++ /dev/null
@@ -1,29 +0,0 @@
-#
-# Add a PPA to source.list
-#
-# NOTE: on older ubuntu releases the sources file added is named
-# 'cloud-init-dev-test-archive-trusty', without 'ubuntu' in the middle
-required_features:
- - apt
- - ppa
- - ppa_file_name
-cloud_config: |
- #cloud-config
- apt:
- sources:
- source1:
- keyid: 0165013E
- keyserver: keyserver.ubuntu.com
- source: "ppa:cloud-init-dev/test-archive"
-collect_scripts:
- sources.list: |
- #!/bin/bash
- cat /etc/apt/sources.list.d/cloud-init-dev-ubuntu-test-archive-*.list
- apt-key: |
- #!/bin/bash
- apt-key finger
- sources_full: |
- #!/bin/bash
- cat /etc/apt/sources.list
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_pipelining_disable.py b/tests/cloud_tests/testcases/modules/apt_pipelining_disable.py
deleted file mode 100644
index c98eedef..00000000
--- a/tests/cloud_tests/testcases/modules/apt_pipelining_disable.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestAptPipeliningDisable(base.CloudTestCase):
- """Test apt-pipelining module."""
-
- def test_disable_pipelining(self):
- """Test pipelining disabled."""
- out = self.get_data_file('90cloud-init-pipelining')
- self.assertIn('Acquire::http::Pipeline-Depth "0";', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml b/tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml
deleted file mode 100644
index 22a31dc4..00000000
--- a/tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-#
-# Disable apt pipelining value
-#
-required_features:
- - apt
-cloud_config: |
- #cloud-config
- apt_pipelining: false
-collect_scripts:
- 90cloud-init-pipelining: |
- #!/bin/bash
- cat /etc/apt/apt.conf.d/90cloud-init-pipelining
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_pipelining_os.py b/tests/cloud_tests/testcases/modules/apt_pipelining_os.py
deleted file mode 100644
index 2b940a66..00000000
--- a/tests/cloud_tests/testcases/modules/apt_pipelining_os.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestAptPipeliningOS(base.CloudTestCase):
- """Test apt-pipelining module."""
-
- def test_os_pipelining(self):
- """test 'os' settings does not write apt config file."""
- out = self.get_data_file('90cloud-init-pipelining_not_written')
- self.assertEqual(0, int(out))
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml b/tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml
deleted file mode 100644
index 86d5220b..00000000
--- a/tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-#
-# Set apt pipelining value to OS, no conf written
-#
-required_features:
- - apt
-cloud_config: |
- #cloud-config
- apt_pipelining: os
-collect_scripts:
- 90cloud-init-pipelining_not_written: |
- #!/bin/bash
- ls /etc/apt/apt.conf.d/90cloud-init-pipelining | wc -l
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/bootcmd.py b/tests/cloud_tests/testcases/modules/bootcmd.py
deleted file mode 100644
index f5b86b03..00000000
--- a/tests/cloud_tests/testcases/modules/bootcmd.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestBootCmd(base.CloudTestCase):
- """Test bootcmd module."""
-
- def test_bootcmd_host(self):
- """Test boot cmd worked."""
- out = self.get_data_file('hosts')
- self.assertIn('192.168.1.130 us.archive.ubuntu.com', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/bootcmd.yaml b/tests/cloud_tests/testcases/modules/bootcmd.yaml
deleted file mode 100644
index 3a73994e..00000000
--- a/tests/cloud_tests/testcases/modules/bootcmd.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-#
-# Early boot command
-#
-cloud_config: |
- #cloud-config
- bootcmd:
- - echo 192.168.1.130 us.archive.ubuntu.com > /etc/hosts
-collect_scripts:
- hosts: |
- #!/bin/bash
- cat /etc/hosts
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/byobu.py b/tests/cloud_tests/testcases/modules/byobu.py
deleted file mode 100644
index 74d0529a..00000000
--- a/tests/cloud_tests/testcases/modules/byobu.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestByobu(base.CloudTestCase):
- """Test Byobu module."""
-
- def test_byobu_installed(self):
- """Test byobu installed."""
- self.assertPackageInstalled('byobu')
-
- def test_byobu_profile_enabled(self):
- """Test byobu profile.d file exists."""
- out = self.get_data_file('byobu_profile_enabled')
- self.assertIn('/etc/profile.d/Z97-byobu.sh', out)
-
- def test_byobu_launch_exists(self):
- """Test byobu-launch exists."""
- out = self.get_data_file('byobu_launch_exists')
- self.assertIn('/usr/bin/byobu-launch', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/byobu.yaml b/tests/cloud_tests/testcases/modules/byobu.yaml
deleted file mode 100644
index d002a611..00000000
--- a/tests/cloud_tests/testcases/modules/byobu.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-#
-# Install and enable byobu system wide and default user
-#
-required_features:
- - byobu
-cloud_config: |
- #cloud-config
- byobu_by_default: enable
-collect_scripts:
- byobu_profile_enabled: |
- #!/bin/bash
- ls /etc/profile.d/Z97-byobu.sh
- byobu_launch_exists: |
- #!/bin/bash
- which /usr/bin/byobu-launch
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ca_certs.py b/tests/cloud_tests/testcases/modules/ca_certs.py
deleted file mode 100644
index 6b56f639..00000000
--- a/tests/cloud_tests/testcases/modules/ca_certs.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestCaCerts(base.CloudTestCase):
- """Test ca certs module."""
-
- def test_certs_updated(self):
- """Test certs have been updated in /etc/ssl/certs."""
- out = self.get_data_file('cert_links')
- # Bionic update-ca-certificates creates less links debian #895075
- unlinked_files = []
- links = {}
- for cert_line in out.splitlines():
- if '->' in cert_line:
- fname, _sep, link = cert_line.split()
- links[fname] = link
- else:
- unlinked_files.append(cert_line)
- self.assertEqual(['ca-certificates.crt'], unlinked_files)
- self.assertEqual('cloud-init-ca-certs.pem', links['a535c1f3.0'])
- self.assertEqual(
- '/usr/share/ca-certificates/cloud-init-ca-certs.crt',
- links['cloud-init-ca-certs.pem'])
-
- def test_cert_installed(self):
- """Test line from our cert exists."""
- out = self.get_data_file('cert')
- self.assertIn('a36c744454555024e7f82edc420fd2c8', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ca_certs.yaml b/tests/cloud_tests/testcases/modules/ca_certs.yaml
deleted file mode 100644
index 2cd91551..00000000
--- a/tests/cloud_tests/testcases/modules/ca_certs.yaml
+++ /dev/null
@@ -1,56 +0,0 @@
-#
-# Remove existing ca_certs and install custom ca-cert
-#
-cloud_config: |
- #cloud-config
- ca-certs:
- remove-defaults: true
- trusted:
- - |
- -----BEGIN CERTIFICATE-----
- MIIGJzCCBA+gAwIBAgIBATANBgkqhkiG9w0BAQUFADCBsjELMAkGA1UEBhMCRlIx
- DzANBgNVBAgMBkFsc2FjZTETMBEGA1UEBwwKU3RyYXNib3VyZzEYMBYGA1UECgwP
- d3d3LmZyZWVsYW4ub3JnMRAwDgYDVQQLDAdmcmVlbGFuMS0wKwYDVQQDDCRGcmVl
- bGFuIFNhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxIjAgBgkqhkiG9w0BCQEW
- E2NvbnRhY3RAZnJlZWxhbi5vcmcwHhcNMTIwNDI3MTAzMTE4WhcNMjIwNDI1MTAz
- MTE4WjB+MQswCQYDVQQGEwJGUjEPMA0GA1UECAwGQWxzYWNlMRgwFgYDVQQKDA93
- d3cuZnJlZWxhbi5vcmcxEDAOBgNVBAsMB2ZyZWVsYW4xDjAMBgNVBAMMBWFsaWNl
- MSIwIAYJKoZIhvcNAQkBFhNjb250YWN0QGZyZWVsYW4ub3JnMIICIjANBgkqhkiG
- 9w0BAQEFAAOCAg8AMIICCgKCAgEA3W29+ID6194bH6ejLrIC4hb2Ugo8v6ZC+Mrc
- k2dNYMNPjcOKABvxxEtBamnSaeU/IY7FC/giN622LEtV/3oDcrua0+yWuVafyxmZ
- yTKUb4/GUgafRQPf/eiX9urWurtIK7XgNGFNUjYPq4dSJQPPhwCHE/LKAykWnZBX
- RrX0Dq4XyApNku0IpjIjEXH+8ixE12wH8wt7DEvdO7T3N3CfUbaITl1qBX+Nm2Z6
- q4Ag/u5rl8NJfXg71ZmXA3XOj7zFvpyapRIZcPmkvZYn7SMCp8dXyXHPdpSiIWL2
- uB3KiO4JrUYvt2GzLBUThp+lNSZaZ/Q3yOaAAUkOx+1h08285Pi+P8lO+H2Xic4S
- vMq1xtLg2bNoPC5KnbRfuFPuUD2/3dSiiragJ6uYDLOyWJDivKGt/72OVTEPAL9o
- 6T2pGZrwbQuiFGrGTMZOvWMSpQtNl+tCCXlT4mWqJDRwuMGrI4DnnGzt3IKqNwS4
- Qyo9KqjMIPwnXZAmWPm3FOKe4sFwc5fpawKO01JZewDsYTDxVj+cwXwFxbE2yBiF
- z2FAHwfopwaH35p3C6lkcgP2k/zgAlnBluzACUI+MKJ/G0gv/uAhj1OHJQ3L6kn1
- SpvQ41/ueBjlunExqQSYD7GtZ1Kg8uOcq2r+WISE3Qc9MpQFFkUVllmgWGwYDuN3
- Zsez95kCAwEAAaN7MHkwCQYDVR0TBAIwADAsBglghkgBhvhCAQ0EHxYdT3BlblNT
- TCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwHQYDVR0OBBYEFFlfyRO6G8y5qEFKikl5
- ajb2fT7XMB8GA1UdIwQYMBaAFCNsLT0+KV14uGw+quK7Lh5sh/JTMA0GCSqGSIb3
- DQEBBQUAA4ICAQAT5wJFPqervbja5+90iKxi1d0QVtVGB+z6aoAMuWK+qgi0vgvr
- mu9ot2lvTSCSnRhjeiP0SIdqFMORmBtOCFk/kYDp9M/91b+vS+S9eAlxrNCB5VOf
- PqxEPp/wv1rBcE4GBO/c6HcFon3F+oBYCsUQbZDKSSZxhDm3mj7pb67FNbZbJIzJ
- 70HDsRe2O04oiTx+h6g6pW3cOQMgIAvFgKN5Ex727K4230B0NIdGkzuj4KSML0NM
- slSAcXZ41OoSKNjy44BVEZv0ZdxTDrRM4EwJtNyggFzmtTuV02nkUj1bYYYC5f0L
- ADr6s0XMyaNk8twlWYlYDZ5uKDpVRVBfiGcq0uJIzIvemhuTrofh8pBQQNkPRDFT
- Rq1iTo1Ihhl3/Fl1kXk1WR3jTjNb4jHX7lIoXwpwp767HAPKGhjQ9cFbnHMEtkro
- RlJYdtRq5mccDtwT0GFyoJLLBZdHHMHJz0F9H7FNk2tTQQMhK5MVYwg+LIaee586
- CQVqfbscp7evlgjLW98H+5zylRHAgoH2G79aHljNKMp9BOuq6SnEglEsiWGVtu2l
- hnx8SB3sVJZHeer8f/UQQwqbAO+Kdy70NmbSaqaVtp8jOxLiidWkwSyRTsuU6D8i
- DiH5uEqBXExjrj0FslxcVKdVj5glVcSmkLwZKbEU1OKwleT/iXFhvooWhQ==
- -----END CERTIFICATE-----
-collect_scripts:
- cert_links: |
- #!/bin/bash
- # links printed <filename> -> <link target>
- # non-links printed <filename>
- for file in `ls /etc/ssl/certs`; do
- [ -h /etc/ssl/certs/$file ] && echo -n $file ' -> ' && readlink /etc/ssl/certs/$file || echo $file;
- done
- cert: |
- #!/bin/bash
- md5sum /etc/ssl/certs/ca-certificates.crt
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/debug_disable.py b/tests/cloud_tests/testcases/modules/debug_disable.py
deleted file mode 100644
index e40e4b89..00000000
--- a/tests/cloud_tests/testcases/modules/debug_disable.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestDebugDisable(base.CloudTestCase):
- """Disable debug messages."""
-
- def test_debug_disable(self):
- """Test verbose output missing from logs."""
- out = self.get_data_file('cloud-init.log')
- self.assertNotIn(
- out, r'Skipping module named [a-z].* verbose printing disabled')
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/debug_disable.yaml b/tests/cloud_tests/testcases/modules/debug_disable.yaml
deleted file mode 100644
index 63218b18..00000000
--- a/tests/cloud_tests/testcases/modules/debug_disable.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
-#
-# Do not run in debug mode
-#
-cloud_config: |
- #cloud-config
- debug:
- verbose: False
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/debug_enable.py b/tests/cloud_tests/testcases/modules/debug_enable.py
deleted file mode 100644
index 28d26062..00000000
--- a/tests/cloud_tests/testcases/modules/debug_enable.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestDebugEnable(base.CloudTestCase):
- """Test debug messages."""
-
- def test_debug_enable(self):
- """Test debug messages in cloud-init log."""
- out = self.get_data_file('cloud-init.log')
- self.assertIn('[DEBUG]', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/debug_enable.yaml b/tests/cloud_tests/testcases/modules/debug_enable.yaml
deleted file mode 100644
index d44147db..00000000
--- a/tests/cloud_tests/testcases/modules/debug_enable.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
-#
-# Run in debug mode
-#
-cloud_config: |
- #cloud-config
- debug:
- verbose: True
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/final_message.py b/tests/cloud_tests/testcases/modules/final_message.py
deleted file mode 100644
index b7b5d5e0..00000000
--- a/tests/cloud_tests/testcases/modules/final_message.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestFinalMessage(base.CloudTestCase):
- """Test cloud init module `cc_final_message`."""
-
- subs_char = '$'
-
- def get_final_message_config(self):
- """Get config for final message."""
- self.assertIn('final_message', self.cloud_config)
- return self.cloud_config['final_message']
-
- def get_final_message(self):
- """Get final message from log."""
- out = self.get_data_file('cloud-init-output.log')
- lines = len(self.get_final_message_config().splitlines())
- return '\n'.join(out.splitlines()[-1 * lines:])
-
- def test_final_message_string(self):
- """Ensure final handles regular strings."""
- for actual, config in zip(
- self.get_final_message().splitlines(),
- self.get_final_message_config().splitlines()):
- if self.subs_char not in config:
- self.assertEqual(actual, config)
-
- def test_final_message_subs(self):
- """Test variable substitution in final message."""
- # TODO: add verification of other substitutions
- patterns = {'$datasource': self.get_datasource()}
- for key, expected in patterns.items():
- index = self.get_final_message_config().splitlines().index(key)
- actual = self.get_final_message().splitlines()[index]
- self.assertEqual(actual, expected)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/final_message.yaml b/tests/cloud_tests/testcases/modules/final_message.yaml
deleted file mode 100644
index c9ed6118..00000000
--- a/tests/cloud_tests/testcases/modules/final_message.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-#
-# Print a final message with various predefined variables
-#
-cloud_config: |
- #cloud-config
- final_message: |
- This is my final message!
- $version
- $timestamp
- $datasource
- $uptime
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/keys_to_console.py b/tests/cloud_tests/testcases/modules/keys_to_console.py
deleted file mode 100644
index 07f38112..00000000
--- a/tests/cloud_tests/testcases/modules/keys_to_console.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestKeysToConsole(base.CloudTestCase):
- """Test proper keys are included and excluded to console."""
-
- def test_excluded_keys(self):
- """Test excluded keys missing."""
- out = self.get_data_file('syslog')
- self.assertNotIn('(DSA)', out)
- self.assertNotIn('(ECDSA)', out)
-
- def test_expected_keys(self):
- """Test expected keys exist."""
- out = self.get_data_file('syslog')
- self.assertIn('(ED25519)', out)
- self.assertIn('(RSA)', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/keys_to_console.yaml b/tests/cloud_tests/testcases/modules/keys_to_console.yaml
deleted file mode 100644
index 5d86e739..00000000
--- a/tests/cloud_tests/testcases/modules/keys_to_console.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-#
-# Hide printing of ssh key and fingerprints for specific keys
-#
-required_features:
- - syslog
-cloud_config: |
- #cloud-config
- ssh_fp_console_blacklist: [ssh-dss, ssh-dsa, ecdsa-sha2-nistp256]
- ssh_key_console_blacklist: [ssh-dss, ssh-dsa, ecdsa-sha2-nistp256]
-collect_scripts:
- syslog: |
- #!/bin/bash
- cat /var/log/syslog
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/landscape.yaml b/tests/cloud_tests/testcases/modules/landscape.yaml
deleted file mode 100644
index ed2c37c4..00000000
--- a/tests/cloud_tests/testcases/modules/landscape.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-#
-# Setup landscape client settings
-#
-# 2016-11-17: Disabled due to this not working
-#
-enabled: false
-required_features:
- - landscape
-cloud_config: |
- #cloud-conifg
- landscape:
- client:
- log_level: "info"
- url: "https://landscape.canonical.com/message-system"
- ping_url: "http://landscape.canonical.com/ping"
- data_path: "/var/lib/landscape/client"
- http_proxy: "http://my.proxy.com/foobar"
- https_proxy: "https://my.proxy.com/foobar"
- tags: "server,cloud"
- computer_title: "footitle"
- registration_key: "fookey"
- account_name: "fooaccount"
-collect_scripts:
- client.conf: |
- #!/bin/bash
- cat /etc/landscape/client.conf
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/locale.py b/tests/cloud_tests/testcases/modules/locale.py
deleted file mode 100644
index cb9e1dce..00000000
--- a/tests/cloud_tests/testcases/modules/locale.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-from cloudinit import util
-
-
-class TestLocale(base.CloudTestCase):
- """Test locale is set properly."""
-
- def test_locale(self):
- """Test locale is set properly."""
- data = util.load_shell_content(self.get_data_file('locale_default'))
- self.assertIn("LANG", data)
- self.assertEqual('en_GB.UTF-8', data['LANG'])
-
- def test_locale_a(self):
- """Test locale -a has both options."""
- out = self.get_data_file('locale_a')
- self.assertIn('en_GB.utf8', out)
- self.assertIn('en_US.utf8', out)
-
- def test_locale_gen(self):
- """Test local.gen file has all entries."""
- out = self.get_data_file('locale_gen')
- self.assertIn('en_GB.UTF-8', out)
- self.assertIn('en_US.UTF-8', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/locale.yaml b/tests/cloud_tests/testcases/modules/locale.yaml
deleted file mode 100644
index e01518a1..00000000
--- a/tests/cloud_tests/testcases/modules/locale.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-#
-# Set locale to non-default option and verify
-#
-required_features:
- - engb_locale
- - locale_gen
-cloud_config: |
- #cloud-config
- locale: en_GB.UTF-8
- locale_configfile: /etc/default/locale
-collect_scripts:
- locale_default: |
- #!/bin/bash
- cat /etc/default/locale
- locale_a: |
- #!/bin/bash
- locale -a
- locale_gen: |
- #!/bin/bash
- cat /etc/locale.gen | grep -v '^#' | uniq
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/lxd_bridge.py b/tests/cloud_tests/testcases/modules/lxd_bridge.py
deleted file mode 100644
index ea545e0a..00000000
--- a/tests/cloud_tests/testcases/modules/lxd_bridge.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestLxdBridge(base.CloudTestCase):
- """Test LXD module."""
-
- @classmethod
- def maybeSkipTest(cls):
- """Skip on cosmic for two reasons:
- a.) LP: #1795036 - 'lxd init' fails on cosmic kernel.
- b.) apt install lxd installs via snap which can be slow
- as that will download core snap and lxd."""
- os_name = cls.data.get('os_name', 'UNKNOWN')
- if os_name == "cosmic":
- raise base.SkipTest('Skipping test on cosmic (LP: #1795036).')
-
- def test_lxd(self):
- """Test lxd installed."""
- out = self.get_data_file('lxd')
- self.assertIn('/lxd', out)
-
- def test_lxc(self):
- """Test lxc installed."""
- out = self.get_data_file('lxc')
- self.assertIn('/lxc', out)
-
- def test_bridge(self):
- """Test bridge config."""
- out = self.get_data_file('lxc-bridge')
- self.assertIn('lxdbr0', out)
- self.assertIn('10.100.100.1/24', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/lxd_bridge.yaml b/tests/cloud_tests/testcases/modules/lxd_bridge.yaml
deleted file mode 100644
index e6b7e76a..00000000
--- a/tests/cloud_tests/testcases/modules/lxd_bridge.yaml
+++ /dev/null
@@ -1,32 +0,0 @@
-#
-# LXD configured with directory backend and IPv4 bridge
-#
-required_features:
- - lxd
-cloud_config: |
- #cloud-config
- lxd:
- init:
- storage_backend: dir
- bridge:
- mode: new
- name: lxdbr0
- ipv4_address: 10.100.100.1
- ipv4_netmask: 24
- ipv4_dhcp_first: 10.100.100.100
- ipv4_dhcp_last: 10.100.100.200
- ipv4_nat: true
- domain: lxd
-collect_scripts:
- lxc: |
- #!/bin/bash
- which lxc
- lxd: |
- #!/bin/bash
- which lxd
- lxc-bridge: |
- #!/bin/bash
- ip addr show lxdbr0
- cat /etc/default/lxd-bridge 2>/dev/null | grep -v ^# | sort -u
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/lxd_dir.py b/tests/cloud_tests/testcases/modules/lxd_dir.py
deleted file mode 100644
index 797bafed..00000000
--- a/tests/cloud_tests/testcases/modules/lxd_dir.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestLxdDir(base.CloudTestCase):
- """Test LXD module."""
-
- @classmethod
- def maybeSkipTest(cls):
- """Skip on cosmic for two reasons:
- a.) LP: #1795036 - 'lxd init' fails on cosmic kernel.
- b.) apt install lxd installs via snap which can be slow
- as that will download core snap and lxd."""
- os_name = cls.data.get('os_name', 'UNKNOWN')
- if os_name == "cosmic":
- raise base.SkipTest('Skipping test on cosmic (LP: #1795036).')
-
- def test_lxd(self):
- """Test lxd installed."""
- out = self.get_data_file('lxd')
- self.assertIn('/lxd', out)
-
- def test_lxc(self):
- """Test lxc installed."""
- out = self.get_data_file('lxc')
- self.assertIn('/lxc', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/lxd_dir.yaml b/tests/cloud_tests/testcases/modules/lxd_dir.yaml
deleted file mode 100644
index f93a3fa7..00000000
--- a/tests/cloud_tests/testcases/modules/lxd_dir.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-#
-# LXD configured with directory backend
-#
-required_features:
- - lxd
-cloud_config: |
- #cloud-config
- lxd:
- init:
- storage_backend: dir
-collect_scripts:
- lxc: |
- #!/bin/bash
- which lxc
- lxd: |
- #!/bin/bash
- which lxd
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ntp.py b/tests/cloud_tests/testcases/modules/ntp.py
deleted file mode 100644
index c63cc15e..00000000
--- a/tests/cloud_tests/testcases/modules/ntp.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestNtp(base.CloudTestCase):
- """Test ntp module"""
-
- def test_ntp_installed(self):
- """Test ntp installed"""
- self.assertPackageInstalled('ntp')
-
- def test_ntp_dist_entries(self):
- """Test dist config file is empty"""
- out = self.get_data_file('ntp_conf_dist_empty')
- self.assertEqual(0, int(out))
-
- def test_ntp_entries(self):
- """Test config entries"""
- out = self.get_data_file('ntp_conf_pool_list')
- self.assertIn('pool.ntp.org iburst', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ntp.yaml b/tests/cloud_tests/testcases/modules/ntp.yaml
deleted file mode 100644
index 7ea0707d..00000000
--- a/tests/cloud_tests/testcases/modules/ntp.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-#
-# Emtpy NTP config to setup using defaults
-#
-cloud_config: |
- #cloud-config
- ntp:
- ntp_client: ntp
- pools: []
- servers: []
-collect_scripts:
- ntp_installed: |
- #!/bin/bash
- ntpd --version > /dev/null 2>&1
- echo $?
- ntp_conf_dist_empty: |
- #!/bin/bash
- ls /etc/ntp.conf.dist | wc -l
- ntp_conf_pool_list: |
- #!/bin/bash
- grep 'pool.ntp.org' /etc/ntp.conf | grep -v ^#
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ntp_chrony.py b/tests/cloud_tests/testcases/modules/ntp_chrony.py
deleted file mode 100644
index 7d341773..00000000
--- a/tests/cloud_tests/testcases/modules/ntp_chrony.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-import unittest
-
-from tests.cloud_tests.testcases import base
-
-
-class TestNtpChrony(base.CloudTestCase):
- """Test ntp module with chrony client"""
-
- def setUp(self):
- """Skip this suite of tests on lxd and artful or older."""
- if self.platform == 'lxd':
- if self.is_distro('ubuntu') and self.os_version_cmp('artful') <= 0:
- raise unittest.SkipTest(
- 'No support for chrony on containers <= artful.'
- ' LP: #1589780')
- return super(TestNtpChrony, self).setUp()
-
- def test_chrony_entries(self):
- """Test chrony config entries"""
- out = self.get_data_file('chrony_conf')
- self.assertIn('.pool.ntp.org', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ntp_chrony.yaml b/tests/cloud_tests/testcases/modules/ntp_chrony.yaml
deleted file mode 100644
index 120735e2..00000000
--- a/tests/cloud_tests/testcases/modules/ntp_chrony.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-#
-# ntp enabled, chrony selected, check conf file
-# as chrony won't start in a container
-#
-cloud_config: |
- #cloud-config
- ntp:
- enabled: true
- ntp_client: chrony
-collect_scripts:
- chrony_conf: |
- #!/bin/sh
- set -- /etc/chrony.conf /etc/chrony/chrony.conf
- for p in "$@"; do
- [ -e "$p" ] && { cat "$p"; exit; }
- done
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ntp_pools.py b/tests/cloud_tests/testcases/modules/ntp_pools.py
deleted file mode 100644
index 152fd3f1..00000000
--- a/tests/cloud_tests/testcases/modules/ntp_pools.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestNtpPools(base.CloudTestCase):
- """Test ntp module."""
-
- def test_ntp_installed(self):
- """Test ntp installed"""
- out = self.get_data_file('ntp_installed_pools')
- self.assertEqual(0, int(out))
-
- def test_ntp_dist_entries(self):
- """Test dist config file is empty"""
- out = self.get_data_file('ntp_conf_dist_pools')
- self.assertEqual(0, int(out))
-
- def test_ntp_entires(self):
- """Test config entries"""
- out = self.get_data_file('ntp_conf_pools')
- pools = self.cloud_config.get('ntp').get('pools')
- for pool in pools:
- self.assertIn('pool %s iburst' % pool, out)
-
- def test_ntpq_servers(self):
- """Test ntpq output has configured servers"""
- out = self.get_data_file('ntpq_servers')
- pools = self.cloud_config.get('ntp').get('pools')
- for pool in pools:
- self.assertIn(pool, out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ntp_pools.yaml b/tests/cloud_tests/testcases/modules/ntp_pools.yaml
deleted file mode 100644
index 60fa0fd1..00000000
--- a/tests/cloud_tests/testcases/modules/ntp_pools.yaml
+++ /dev/null
@@ -1,32 +0,0 @@
-#
-# NTP config using specific pools
-#
-# NOTE: lsb_release listed here because with recent cloud-init deb with
-# (LP: 1628337) resolved, cloud-init will attempt to configure archives.
-# this fails without lsb_release as UNAVAILABLE is used for $RELEASE
-required_features:
- - lsb_release
-cloud_config: |
- #cloud-config
- ntp:
- ntp_client: ntp
- pools:
- - 0.cloud-init.mypool
- - 1.cloud-init.mypool
- - 172.16.15.14
-collect_scripts:
- ntp_installed_pools: |
- #!/bin/bash
- ntpd --version > /dev/null 2>&1
- echo $?
- ntp_conf_dist_pools: |
- #!/bin/bash
- ls /etc/ntp.conf.dist | wc -l
- ntp_conf_pools: |
- #!/bin/bash
- grep '^pool' /etc/ntp.conf
- ntpq_servers: |
- #!/bin/sh
- ntpq -p -w -n
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ntp_servers.py b/tests/cloud_tests/testcases/modules/ntp_servers.py
deleted file mode 100644
index 8d2a68b3..00000000
--- a/tests/cloud_tests/testcases/modules/ntp_servers.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script"""
-from tests.cloud_tests.testcases import base
-
-
-class TestNtpServers(base.CloudTestCase):
- """Test ntp module"""
-
- def test_ntp_installed(self):
- """Test ntp installed"""
- out = self.get_data_file('ntp_installed_servers')
- self.assertEqual(0, int(out))
-
- def test_ntp_dist_entries(self):
- """Test dist config file is empty"""
- out = self.get_data_file('ntp_conf_dist_servers')
- self.assertEqual(0, int(out))
-
- def test_ntp_entries(self):
- """Test config server entries"""
- out = self.get_data_file('ntp_conf_servers')
- servers = self.cloud_config.get('ntp').get('servers')
- for server in servers:
- self.assertIn('server %s iburst' % server, out)
-
- def test_ntpq_servers(self):
- """Test ntpq output has configured servers"""
- out = self.get_data_file('ntpq_servers')
- servers = self.cloud_config.get('ntp').get('servers')
- for server in servers:
- self.assertIn(server, out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ntp_servers.yaml b/tests/cloud_tests/testcases/modules/ntp_servers.yaml
deleted file mode 100644
index ee636679..00000000
--- a/tests/cloud_tests/testcases/modules/ntp_servers.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-#
-# NTP config using specific servers
-#
-required_features:
- - lsb_release
-cloud_config: |
- #cloud-config
- ntp:
- ntp_client: ntp
- servers:
- - 172.16.15.14
- - 172.16.17.18
-collect_scripts:
- ntp_installed_servers: |
- #!/bin/sh
- ntpd --version > /dev/null 2>&1
- echo $?
- ntp_conf_dist_servers: |
- #!/bin/sh
- cat /etc/ntp.conf.dist | wc -l
- ntp_conf_servers: |
- #!/bin/sh
- grep '^server' /etc/ntp.conf
- ntpq_servers: |
- #!/bin/sh
- ntpq -p -w -n
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ntp_timesyncd.py b/tests/cloud_tests/testcases/modules/ntp_timesyncd.py
deleted file mode 100644
index eca750bc..00000000
--- a/tests/cloud_tests/testcases/modules/ntp_timesyncd.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestNtpTimesyncd(base.CloudTestCase):
- """Test ntp module with systemd-timesyncd client"""
-
- def test_timesyncd_entries(self):
- """Test timesyncd config entries"""
- out = self.get_data_file('timesyncd_conf')
- self.assertIn('.pool.ntp.org', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ntp_timesyncd.yaml b/tests/cloud_tests/testcases/modules/ntp_timesyncd.yaml
deleted file mode 100644
index ee47a741..00000000
--- a/tests/cloud_tests/testcases/modules/ntp_timesyncd.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-#
-# ntp enabled, systemd-timesyncd selected, check conf file
-# as systemd-timesyncd won't start in a container
-#
-cloud_config: |
- #cloud-config
- ntp:
- enabled: true
- ntp_client: systemd-timesyncd
-collect_scripts:
- timesyncd_conf: |
- #!/bin/sh
- cat /etc/systemd/timesyncd.conf.d/cloud-init.conf
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/package_update_upgrade_install.py b/tests/cloud_tests/testcases/modules/package_update_upgrade_install.py
deleted file mode 100644
index fecad768..00000000
--- a/tests/cloud_tests/testcases/modules/package_update_upgrade_install.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestPackageInstallUpdateUpgrade(base.CloudTestCase):
- """Test package install update upgrade module."""
-
- def test_installed_sl(self):
- """Test sl got installed."""
- self.assertPackageInstalled('sl')
-
- def test_installed_tree(self):
- """Test tree got installed."""
- self.assertPackageInstalled('tree')
-
- def test_apt_history(self):
- """Test apt history for update command."""
- out = self.get_data_file('apt_history_cmdline')
- self.assertIn(
- 'Commandline: /usr/bin/apt-get --option=Dpkg::Options'
- '::=--force-confold --option=Dpkg::options::=--force-unsafe-io '
- '--assume-yes --quiet install sl tree', out)
-
- def test_cloud_init_output(self):
- """Test cloud-init-output for install & upgrade stuff."""
- out = self.get_data_file('cloud-init-output.log')
- self.assertIn('Setting up tree (', out)
- self.assertIn('Setting up sl (', out)
- self.assertIn('Reading package lists...', out)
- self.assertIn('Building dependency tree...', out)
- self.assertIn('Reading state information...', out)
- self.assertIn('Calculating upgrade...', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml b/tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml
deleted file mode 100644
index dd79e438..00000000
--- a/tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
-#
-# Update/upgrade via apt and then install a pair of packages
-#
-# NOTE: this should not require apt feature, use 'which' rather than 'dpkg -l'
-# NOTE: the testcase for this looks for the command in history.log as
-# /usr/bin/apt-get..., which is not how it always appears. it should
-# instead look for just apt-get...
-# NOTE: this testcase should not require 'apt_up_out', and should look for a
-# call to 'apt-get upgrade' or 'apt-get dist-upgrade' in cloud-init.log
-# rather than 'Calculating upgrade...' in output
-required_features:
- - apt
- - apt_hist_fmt
- - apt_up_out
-cloud_config: |
- #cloud-config
- packages:
- - sl
- - tree
- package_update: true
- package_upgrade: true
-collect_scripts:
- apt_history_cmdline: |
- #!/bin/bash
- grep ^Commandline: /var/log/apt/history.log
- dpkg_show: |
- #!/bin/bash
- dpkg-query --show
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/runcmd.py b/tests/cloud_tests/testcases/modules/runcmd.py
deleted file mode 100644
index 9fce3062..00000000
--- a/tests/cloud_tests/testcases/modules/runcmd.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestRunCmd(base.CloudTestCase):
- """Test runcmd module."""
-
- def test_run_cmd(self):
- """Test run command worked."""
- out = self.get_data_file('run_cmd')
- self.assertIn('cloud-init run cmd test', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/runcmd.yaml b/tests/cloud_tests/testcases/modules/runcmd.yaml
deleted file mode 100644
index 8309a883..00000000
--- a/tests/cloud_tests/testcases/modules/runcmd.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-#
-# Run a simple command
-#
-cloud_config: |
- #cloud-config
- runcmd:
- - echo cloud-init run cmd test > /var/tmp/run_cmd
-collect_scripts:
- run_cmd: |
- #!/bin/bash
- cat /var/tmp/run_cmd
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/seed_random_command.yaml b/tests/cloud_tests/testcases/modules/seed_random_command.yaml
deleted file mode 100644
index 6a9157eb..00000000
--- a/tests/cloud_tests/testcases/modules/seed_random_command.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
-#
-# Use uuid to create a random string
-#
-# 2016-11-15 Disabled as this is not working currently
-#
-enabled: False
-cloud_config: |
- #cloud-config
- random_seed:
- command: ["cat", "/proc/sys/kernel/random/uuid"]
- command_required: true
- file: /root/seed
-collect_scripts:
- seed_data: |
- #!/bin/bash
- cat /root/seed
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/seed_random_data.py b/tests/cloud_tests/testcases/modules/seed_random_data.py
deleted file mode 100644
index db433d26..00000000
--- a/tests/cloud_tests/testcases/modules/seed_random_data.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestSeedRandom(base.CloudTestCase):
- """Test seed random module."""
-
- def test_random_seed_data(self):
- """Test random data passed in exists."""
- out = self.get_data_file('seed_data')
- self.assertIn('MYUb34023nD:LFDK10913jk;dfnk:Df', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/seed_random_data.yaml b/tests/cloud_tests/testcases/modules/seed_random_data.yaml
deleted file mode 100644
index a9b2c885..00000000
--- a/tests/cloud_tests/testcases/modules/seed_random_data.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-#
-# Push in random raw string to set as seed
-#
-cloud_config: |
- #cloud-config
- random_seed:
- data: 'MYUb34023nD:LFDK10913jk;dfnk:Df'
- encoding: raw
- file: /root/seed
-collect_scripts:
- seed_data: |
- #!/bin/bash
- cat /root/seed
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/set_hostname.py b/tests/cloud_tests/testcases/modules/set_hostname.py
deleted file mode 100644
index 1dbe64c2..00000000
--- a/tests/cloud_tests/testcases/modules/set_hostname.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestHostname(base.CloudTestCase):
- """Test hostname module."""
-
- ex_hostname = "cloudinit2"
-
- def test_hostname(self):
- """Test hostname command shows correct output."""
- out = self.get_data_file('hostname')
- self.assertIn(self.ex_hostname, out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/set_hostname.yaml b/tests/cloud_tests/testcases/modules/set_hostname.yaml
deleted file mode 100644
index 071fb220..00000000
--- a/tests/cloud_tests/testcases/modules/set_hostname.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-#
-# Set the hostname and update /etc/hosts
-#
-required_features:
- - hostname
-cloud_config: |
- #cloud-config
- hostname: cloudinit2
-
-collect_scripts:
- hosts: |
- #!/bin/bash
- grep ^127 /etc/hosts
- hostname: |
- #!/bin/bash
- hostname
- fqdn: |
- #!/bin/bash
- hostname --fqdn
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/set_hostname_fqdn.py b/tests/cloud_tests/testcases/modules/set_hostname_fqdn.py
deleted file mode 100644
index a405b30b..00000000
--- a/tests/cloud_tests/testcases/modules/set_hostname_fqdn.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests import CI_DOMAIN
-from tests.cloud_tests.testcases import base
-
-
-class TestHostnameFqdn(base.CloudTestCase):
- """Test Hostname module."""
-
- ex_hostname = "cloudinit1"
- ex_fqdn = "cloudinit2." + CI_DOMAIN
-
- def test_hostname(self):
- """Test hostname output."""
- out = self.get_data_file('hostname')
- self.assertIn(self.ex_hostname, out)
-
- def test_hostname_fqdn(self):
- """Test hostname fqdn output."""
- out = self.get_data_file('fqdn')
- self.assertIn(self.ex_fqdn, out)
-
- def test_hosts(self):
- """Test /etc/hosts file."""
- out = self.get_data_file('hosts')
- self.assertIn('127.0.1.1 %s %s' % (self.ex_fqdn, self.ex_hostname),
- out)
- self.assertIn('127.0.0.1 localhost', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/set_hostname_fqdn.yaml b/tests/cloud_tests/testcases/modules/set_hostname_fqdn.yaml
deleted file mode 100644
index a85ee79e..00000000
--- a/tests/cloud_tests/testcases/modules/set_hostname_fqdn.yaml
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-# Set the hostname and update /etc/hosts
-#
-required_features:
- - hostname
-cloud_config: |
- #cloud-config
- manage_etc_hosts: true
- hostname: cloudinit1
- # this needs changing if CI_DOMAIN were updated.
- fqdn: cloudinit2.i9n.cloud-init.io
-collect_scripts:
- hosts: |
- #!/bin/bash
- grep ^127 /etc/hosts
- hostname: |
- #!/bin/bash
- hostname
- fqdn: |
- #!/bin/bash
- hostname --fqdn
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/set_password.py b/tests/cloud_tests/testcases/modules/set_password.py
deleted file mode 100644
index a29b2261..00000000
--- a/tests/cloud_tests/testcases/modules/set_password.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestPassword(base.CloudTestCase):
- """Test password module."""
-
- # TODO add test to make sure password is actually "password"
-
- def test_shadow(self):
- """Test ubuntu user in shadow."""
- out = self.get_data_file('shadow')
- self.assertIn('ubuntu:', out)
-
- def test_sshd_config(self):
- """Test sshd config allows passwords."""
- out = self.get_data_file('sshd_config')
- self.assertIn('PasswordAuthentication yes', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/set_password.yaml b/tests/cloud_tests/testcases/modules/set_password.yaml
deleted file mode 100644
index 04d7c58a..00000000
--- a/tests/cloud_tests/testcases/modules/set_password.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-#
-# Set password of default user
-#
-required_features:
- - ubuntu_user
-cloud_config: |
- #cloud-config
- password: password
- chpasswd: { expire: False }
- ssh_pwauth: True
-collect_scripts:
- shadow: |
- #!/bin/bash
- cat /etc/shadow
- sshd_config: |
- #!/bin/bash
- grep '^PasswordAuth' /etc/ssh/sshd_config
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/set_password_expire.py b/tests/cloud_tests/testcases/modules/set_password_expire.py
deleted file mode 100644
index 967aca7b..00000000
--- a/tests/cloud_tests/testcases/modules/set_password_expire.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestPasswordExpire(base.CloudTestCase):
- """Test password module."""
-
- def test_shadow(self):
- """Test user frozen in shadow."""
- out = self.get_data_file('shadow')
- self.assertIn('harry:!:', out)
- self.assertIn('dick:!:', out)
- self.assertIn('tom:!:', out)
- self.assertIn('harry:!:', out)
-
- def test_sshd_config(self):
- """Test sshd config allows passwords."""
- out = self.get_data_file('sshd_config')
- self.assertIn('PasswordAuthentication yes', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/set_password_expire.yaml b/tests/cloud_tests/testcases/modules/set_password_expire.yaml
deleted file mode 100644
index ba6344b9..00000000
--- a/tests/cloud_tests/testcases/modules/set_password_expire.yaml
+++ /dev/null
@@ -1,32 +0,0 @@
-#
-# Expire password for all users
-#
-required_features:
- - sshd
-cloud_config: |
- #cloud-config
- chpasswd: { expire: True }
- ssh_pwauth: yes
- users:
- - default
- - name: tom
- password: $1$xyz$sPMsLNmf66Ohl.ol6JvzE.
- lock_passwd: false
- - name: dick
- password: $1$xyz$sPMsLNmf66Ohl.ol6JvzE.
- lock_passwd: false
- - name: harry
- password: $1$xyz$sPMsLNmf66Ohl.ol6JvzE.
- lock_passwd: false
- - name: jane
- password: $1$xyz$sPMsLNmf66Ohl.ol6JvzE.
- lock_passwd: false
-collect_scripts:
- shadow: |
- #!/bin/bash
- cat /etc/shadow
- sshd_config: |
- #!/bin/bash
- grep '^PasswordAuth' /etc/ssh/sshd_config
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/set_password_list.py b/tests/cloud_tests/testcases/modules/set_password_list.py
deleted file mode 100644
index 375cd27d..00000000
--- a/tests/cloud_tests/testcases/modules/set_password_list.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestPasswordList(base.PasswordListTest, base.CloudTestCase):
- """Test password setting via list in chpasswd/list."""
-
- __test__ = True
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/set_password_list.yaml b/tests/cloud_tests/testcases/modules/set_password_list.yaml
deleted file mode 100644
index fd3e1e44..00000000
--- a/tests/cloud_tests/testcases/modules/set_password_list.yaml
+++ /dev/null
@@ -1,41 +0,0 @@
-#
-# Set password of list of users
-#
-cloud_config: |
- #cloud-config
- ssh_pwauth: yes
- users:
- - default
- - name: tom
- # md5 gotomgo
- passwd: "$1$S7$tT1BEDIYrczeryDQJfdPe0"
- lock_passwd: false
- - name: dick
- # md5 gocubsgo
- passwd: "$1$ssisyfpf$YqvuJLfrrW6Cg/l53Pi1n1"
- lock_passwd: false
- - name: harry
- # sha512 goharrygo
- passwd: "$6$LF$9Z2p6rWK6TNC1DC6393ec0As.18KRAvKDbfsGJEdWN3sRQRwpdfoh37EQ3yUh69tP4GSrGW5XKHxMLiKowJgm/"
- lock_passwd: false
- - name: jane
- # sha256 gojanego
- passwd: "$5$iW$XsxmWCdpwIW8Yhv.Jn/R3uk6A4UaicfW5Xp7C9p9pg."
- lock_passwd: false
- - name: "mikey"
- lock_passwd: false
- chpasswd:
- list:
- - tom:mypassword123!
- - dick:RANDOM
- - harry:RANDOM
- - mikey:$5$xZ$B2YGGEx2AOf4PeW48KC6.QyT1W2B4rZ9Qbltudtha89
-collect_scripts:
- shadow: |
- #!/bin/bash
- cat /etc/shadow
- sshd_config: |
- #!/bin/bash
- grep '^PasswordAuth' /etc/ssh/sshd_config
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/set_password_list_string.py b/tests/cloud_tests/testcases/modules/set_password_list_string.py
deleted file mode 100644
index 8c2634c5..00000000
--- a/tests/cloud_tests/testcases/modules/set_password_list_string.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestPasswordListString(base.PasswordListTest, base.CloudTestCase):
- """Test password setting via string in chpasswd/list."""
-
- __test__ = True
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/set_password_list_string.yaml b/tests/cloud_tests/testcases/modules/set_password_list_string.yaml
deleted file mode 100644
index e9fe54b0..00000000
--- a/tests/cloud_tests/testcases/modules/set_password_list_string.yaml
+++ /dev/null
@@ -1,41 +0,0 @@
-#
-# Set password of list of users as a string
-#
-cloud_config: |
- #cloud-config
- ssh_pwauth: yes
- users:
- - default
- - name: tom
- # md5 gotomgo
- passwd: "$1$S7$tT1BEDIYrczeryDQJfdPe0"
- lock_passwd: false
- - name: dick
- # md5 gocubsgo
- passwd: "$1$ssisyfpf$YqvuJLfrrW6Cg/l53Pi1n1"
- lock_passwd: false
- - name: harry
- # sha512 goharrygo
- passwd: "$6$LF$9Z2p6rWK6TNC1DC6393ec0As.18KRAvKDbfsGJEdWN3sRQRwpdfoh37EQ3yUh69tP4GSrGW5XKHxMLiKowJgm/"
- lock_passwd: false
- - name: jane
- # sha256 gojanego
- passwd: "$5$iW$XsxmWCdpwIW8Yhv.Jn/R3uk6A4UaicfW5Xp7C9p9pg."
- lock_passwd: false
- - name: "mikey"
- lock_passwd: false
- chpasswd:
- list: |
- tom:mypassword123!
- dick:RANDOM
- harry:RANDOM
- mikey:$5$xZ$B2YGGEx2AOf4PeW48KC6.QyT1W2B4rZ9Qbltudtha89
-collect_scripts:
- shadow: |
- #!/bin/bash
- cat /etc/shadow
- sshd_config: |
- #!/bin/bash
- grep '^PasswordAuth' /etc/ssh/sshd_config
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/snap.py b/tests/cloud_tests/testcases/modules/snap.py
deleted file mode 100644
index ff68abbe..00000000
--- a/tests/cloud_tests/testcases/modules/snap.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script"""
-from tests.cloud_tests.testcases import base
-
-
-class TestSnap(base.CloudTestCase):
- """Test snap module"""
-
- def test_snappy_version(self):
- """Expect hello-world and core snaps are installed."""
- out = self.get_data_file('snaplist')
- self.assertIn('core', out)
- self.assertIn('hello-world', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/snap.yaml b/tests/cloud_tests/testcases/modules/snap.yaml
deleted file mode 100644
index 322199c3..00000000
--- a/tests/cloud_tests/testcases/modules/snap.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-#
-# Install snappy
-#
-# Aug 23, 2018: Disabled due to requiring a proxy for testing
-# tests do not handle the proxy well at this time.
-enabled: False
-required_features:
- - snap
-cloud_config: |
- #cloud-config
- package_update: true
- snap:
- squashfuse_in_container: true
- commands:
- - snap install hello-world
-collect_scripts:
- snaplist: |
- #!/bin/bash
- snap list
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.py b/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.py
deleted file mode 100644
index 02935447..00000000
--- a/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestSshKeyFingerprintsDisable(base.CloudTestCase):
- """Test ssh key fingerprints module."""
-
- def test_cloud_init_log(self):
- """Verify disabled."""
- out = self.get_data_file('cloud-init.log')
- self.assertIn('Skipping module named ssh-authkey-fingerprints, '
- 'logging of SSH fingerprints disabled', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.yaml b/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.yaml
deleted file mode 100644
index d93893e2..00000000
--- a/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-#
-# Disable fingerprint printing
-#
-required_features:
- - syslog
-cloud_config: |
- #cloud-config
- no_ssh_fingerprints: true
-collect_scripts:
- syslog: |
- #!/bin/bash
- cat /var/log/syslog
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_enable.py b/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_enable.py
deleted file mode 100644
index 3510e75a..00000000
--- a/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_enable.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestSshKeyFingerprintsEnable(base.CloudTestCase):
- """Test ssh key fingerprints module."""
-
- def test_syslog(self):
- """Verify output of syslog."""
- out = self.get_data_file('syslog')
- self.assertRegex(out, r'256 SHA256:.*(ECDSA)')
- self.assertRegex(out, r'256 SHA256:.*(ED25519)')
- self.assertNotRegex(out, r'1024 SHA256:.*(DSA)')
- self.assertNotRegex(out, r'2048 SHA256:.*(RSA)')
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_enable.yaml b/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_enable.yaml
deleted file mode 100644
index 9f5dc34a..00000000
--- a/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_enable.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-#
-# Print auth keys with different hash than md5
-#
-# NOTE: testcase checks for '256 SHA256:.*(ECDSA)' on output line on trusty
-# this fails as line in output reads '256:.*(ECDSA)'
-required_features:
- - syslog
- - ssh_key_fmt
-cloud_config: |
- #cloud-config
- ssh_genkeytypes:
- - ecdsa
- - ed25519
- ssh_authorized_keys:
- - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDXW9Gg5H7ehjdSc6qDzwNtgCy94XYHhEYlXZMO2+FJrH3wfHGiMfCwOHxcOMt2QiXItULthdeQWS9QjBSSjVRXf6731igFrqPFyS9qBlOQ5D29C4HBXFnQggGVpBNJ82IRJv7szbbe/vpgLBP4kttUza9Dr4e1YM1ln4PRnjfXea6T0m+m1ixNb5432pTXlqYOnNOxSIm1gHgMLxPuDrJvQERDKrSiKSjIdyC9Jd8t2e1tkNLY0stmckVRbhShmcJvlyofHWbc2Ca1mmtP7MlS1VQnfLkvU1IrFwkmaQmaggX6WR6coRJ6XFXdWcq/AI2K6GjSnl1dnnCxE8VCEXBlXgFzad+PMSG4yiL5j8Oo1ZVpkTdgBnw4okGqTYCXyZg6X00As9IBNQfZMFlQXlIo4FiWgj3CO5QHQOyOX6FuEumaU13GnERrSSdp9tCs1Qm3/DG2RSCQBWTfcgMcStIvKqvJ3IjFn0vGLvI3Ampnq9q1SHwmmzAPSdzcMA76HyMUA5VWaBvWHlUxzIM6unxZASnwvuCzpywSEB5J2OF+p6H+cStJwQ32XwmOG8pLp1srlVWpqZI58Du/lzrkPqONphoZx0LDV86w7RUz1ksDzAdcm0tvmNRFMN1a0frDs506oA3aWK0oDk4Nmvk8sXGTYYw3iQSkOvDUUlIsqdaO+w==
-collect_scripts:
- syslog: |
- #!/bin/bash
- cat /var/log/syslog
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ssh_import_id.py b/tests/cloud_tests/testcases/modules/ssh_import_id.py
deleted file mode 100644
index ef156f47..00000000
--- a/tests/cloud_tests/testcases/modules/ssh_import_id.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestSshImportId(base.CloudTestCase):
- """Test ssh import id module."""
-
- def test_authorized_keys(self):
- """Test that ssh keys were imported."""
- out = self.get_data_file('auth_keys_ubuntu')
-
- self.assertIn('# ssh-import-id gh:powersj', out)
- self.assertIn('# ssh-import-id lp:smoser', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ssh_import_id.yaml b/tests/cloud_tests/testcases/modules/ssh_import_id.yaml
deleted file mode 100644
index b62d3f69..00000000
--- a/tests/cloud_tests/testcases/modules/ssh_import_id.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-#
-# Import a user's ssh key via gh or lp
-#
-required_features:
- - ubuntu_user
- - sudo
-cloud_config: |
- #cloud-config
- ssh_import_id:
- - gh:powersj
- - lp:smoser
-collect_scripts:
- auth_keys_ubuntu: |
- #!/bin/bash
- cat /home/ubuntu/.ssh/authorized_keys
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ssh_keys_generate.py b/tests/cloud_tests/testcases/modules/ssh_keys_generate.py
deleted file mode 100644
index b68f5565..00000000
--- a/tests/cloud_tests/testcases/modules/ssh_keys_generate.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestSshKeysGenerate(base.CloudTestCase):
- """Test ssh keys module."""
-
- # TODO: Check cloud-init-output for the correct keys being generated
-
- def test_dsa_public(self):
- """Test dsa public key not generated."""
- out = self.get_data_file('dsa_public')
- self.assertEqual('', out)
-
- def test_dsa_private(self):
- """Test dsa private key not generated."""
- out = self.get_data_file('dsa_private')
- self.assertEqual('', out)
-
- def test_rsa_public(self):
- """Test rsa public key not generated."""
- out = self.get_data_file('rsa_public')
- self.assertEqual('', out)
-
- def test_rsa_private(self):
- """Test rsa public key not generated."""
- out = self.get_data_file('rsa_private')
- self.assertEqual('', out)
-
- def test_ecdsa_public(self):
- """Test ecdsa public key generated."""
- out = self.get_data_file('ecdsa_public')
- self.assertIsNotNone(out)
-
- def test_ecdsa_private(self):
- """Test ecdsa public key generated."""
- out = self.get_data_file('ecdsa_private')
- self.assertIsNotNone(out)
-
- def test_ed25519_public(self):
- """Test ed25519 public key generated."""
- out = self.get_data_file('ed25519_public')
- self.assertIsNotNone(out)
-
- def test_ed25519_private(self):
- """Test ed25519 public key generated."""
- out = self.get_data_file('ed25519_private')
- self.assertIsNotNone(out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ssh_keys_generate.yaml b/tests/cloud_tests/testcases/modules/ssh_keys_generate.yaml
deleted file mode 100644
index 0a7adf62..00000000
--- a/tests/cloud_tests/testcases/modules/ssh_keys_generate.yaml
+++ /dev/null
@@ -1,38 +0,0 @@
-#
-# SSH keys generated using cloud-init
-#
-required_features:
- - ubuntu_user
-cloud_config: |
- #cloud-config
- ssh_genkeytypes:
- - ecdsa
- - ed25519
- authkey_hash: sha512
-collect_scripts:
- dsa_public: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_dsa_key.pub
- dsa_private: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_dsa_key
- rsa_public: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_rsa_key.pub
- rsa_private: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_rsa_key
- ecdsa_public: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_ecdsa_key.pub
- ecdsa_private: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_ecdsa_key
- ed25519_public: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_ed25519_key.pub
- ed25519_private: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_ed25519_key
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ssh_keys_provided.py b/tests/cloud_tests/testcases/modules/ssh_keys_provided.py
deleted file mode 100644
index add3f469..00000000
--- a/tests/cloud_tests/testcases/modules/ssh_keys_provided.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestSshKeysProvided(base.CloudTestCase):
- """Test ssh keys module."""
-
- def test_dsa_public(self):
- """Test dsa public key passed in."""
- out = self.get_data_file('dsa_public')
- self.assertIn('AAAAB3NzaC1kc3MAAACBAPkWy1zbchVIN7qTgM0/yyY8q4RZS8c'
- 'NM4ZpeuE5UB/Nnr6OSU/nmbO8LuM', out)
-
- def test_dsa_private(self):
- """Test dsa private key passed in."""
- out = self.get_data_file('dsa_private')
- self.assertIn('MIIBuwIBAAKBgQD5Fstc23IVSDe6k4DNP8smPKuEWUvHDTOGaXr'
- 'hOVAfzZ6+jklP', out)
-
- def test_rsa_public(self):
- """Test rsa public key passed in."""
- out = self.get_data_file('rsa_public')
- self.assertIn('AAAAB3NzaC1yc2EAAAADAQABAAABAQC0/Ho+o3eJISydO2JvIgT'
- 'LnZOtrxPl+fSvJfKDjoOLY0HB2eOjy2s2/2N6d9X9SGZ4', out)
-
- def test_rsa_private(self):
- """Test rsa public key passed in."""
- out = self.get_data_file('rsa_private')
- self.assertIn('4DOkqNiUGl80Zp1RgZNohHUXlJMtAbrIlAVEk+mTmg7vjfyp2un'
- 'RQvLZpMRdywBm', out)
-
- def test_ecdsa_public(self):
- """Test ecdsa public key passed in."""
- out = self.get_data_file('ecdsa_public')
- self.assertIn('AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAAB'
- 'BBFsS5Tvky/IC/dXhE/afxxU', out)
-
- def test_ecdsa_private(self):
- """Test ecdsa public key passed in."""
- out = self.get_data_file('ecdsa_private')
- self.assertIn('AwEHoUQDQgAEWxLlO+TL8gL91eET9p/HFQbqR1A691AkJgZk3jY'
- '5mpZqxgX4vcgb', out)
-
- def test_ed25519_public(self):
- """Test ed25519 public key passed in."""
- out = self.get_data_file('ed25519_public')
- self.assertIn('AAAAC3NzaC1lZDI1NTE5AAAAINudAZSu4vjZpVWzId5pXmZg1M6'
- 'G15dqjQ2XkNVOEnb5', out)
-
- def test_ed25519_private(self):
- """Test ed25519 public key passed in."""
- out = self.get_data_file('ed25519_private')
- self.assertIn('XAAAAAtzc2gtZWQyNTUxOQAAACDbnQGUruL42aVVsyHeaV5mYNT'
- 'OhteXao0Nl5DVThJ2+Q', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/ssh_keys_provided.yaml b/tests/cloud_tests/testcases/modules/ssh_keys_provided.yaml
deleted file mode 100644
index 41f63550..00000000
--- a/tests/cloud_tests/testcases/modules/ssh_keys_provided.yaml
+++ /dev/null
@@ -1,99 +0,0 @@
-#
-# SSH keys provided via cloud config
-#
-enabled: False
-required_features:
- - ubuntu_user
- - sudo
-cloud_config: |
- #cloud-config
- disable_root: false
- ssh_authorized_keys:
- - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDXW9Gg5H7ehjdSc6qDzwNtgCy94XYHhEYlXZMO2+FJrH3wfHGiMfCwOHxcOMt2QiXItULthdeQWS9QjBSSjVRXf6731igFrqPFyS9qBlOQ5D29C4HBXFnQggGVpBNJ82IRJv7szbbe/vpgLBP4kttUza9Dr4e1YM1ln4PRnjfXea6T0m+m1ixNb5432pTXlqYOnNOxSIm1gHgMLxPuDrJvQERDKrSiKSjIdyC9Jd8t2e1tkNLY0stmckVRbhShmcJvlyofHWbc2Ca1mmtP7MlS1VQnfLkvU1IrFwkmaQmaggX6WR6coRJ6XFXdWcq/AI2K6GjSnl1dnnCxE8VCEXBlXgFzad+PMSG4yiL5j8Oo1ZVpkTdgBnw4okGqTYCXyZg6X00As9IBNQfZMFlQXlIo4FiWgj3CO5QHQOyOX6FuEumaU13GnERrSSdp9tCs1Qm3/DG2RSCQBWTfcgMcStIvKqvJ3IjFn0vGLvI3Ampnq9q1SHwmmzAPSdzcMA76HyMUA5VWaBvWHlUxzIM6unxZASnwvuCzpywSEB5J2OF+p6H+cStJwQ32XwmOG8pLp1srlVWpqZI58Du/lzrkPqONphoZx0LDV86w7RUz1ksDzAdcm0tvmNRFMN1a0frDs506oA3aWK0oDk4Nmvk8sXGTYYw3iQSkOvDUUlIsqdaO+w==
- ssh_keys:
- rsa_private: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKCAQEAtPx6PqN3iSEsnTtibyIEy52Tra8T5fn0ryXyg46Di2NBwdnj
- o8trNv9jenfV/UhmePl58lXjT43wV8OCMl6KsYXyBdegM35NNtono4I4mLLKFMR9
- 9TOtDn6iYcaNenVhF3ZCj9Z2nNOlTrdc0uchHqKMrxLjCRCUrL91Uf+xioTF901Y
- RM+ZqC5lT92yAL76F4qPF+Lq1QtUfNfUIwwvOp5ccDZLPxij0YvyBzubYye9hJHu
- yjbJv78R4JHV+L2WhzSoX3W/6WrxVzeXqFGqH894ccOaC/7tnqSP6V8lIQ6fE2+c
- DurJcpM3CJRgkndGHjtU55Y71YkcdLksSMvezQIDAQABAoIBAQCrU4IJP8dNeaj5
- IpkY6NQvR/jfZqfogYi+MKb1IHin/4rlDfUvPcY9pt8ttLlObjYK+OcWn3Vx/sRw
- 4DOkqNiUGl80Zp1RgZNohHUXlJMtAbrIlAVEk+mTmg7vjfyp2unRQvLZpMRdywBm
- lq95OrCghnG03aUsFJUZPpi5ydnwbA12ma+KHkG0EzaVlhA7X9N6z0K6U+zue2gl
- goMLt/MH0rsYawkHrwiwXaIFQeyV4MJP0vmrZLbFk1bycu9X/xPtTYotWyWo4eKA
- cb05uu04qwexkKHDM0KXtT0JecbTo2rOefFo8Uuab6uJY+fEHNocZ+v1vLA4aOxJ
- ovp1JuXlAoGBAOWYNgKrlTfy5n0sKsNk+1RuL2jHJZJ3HMd0EIt7/fFQN3Fi08Hu
- jtntqD30Wj+DJK8b8Lrt66FruxyEJm5VhVmwkukrLR5ige2f6ftZnoFCmdyy+0zP
- dnPZSUe2H5ZPHa+qthJgHLn+al2P04tGh+1fGHC2PbP+e0Co+/ZRIOxrAoGBAMnN
- IEen9/FRsqvnDd36I8XnJGskVRTZNjylxBmbKcuMWm+gNhOI7gsCAcqzD4BYZjjW
- pLhrt/u9p+l4MOJy6OUUdM/okg12SnJEGryysOcVBcXyrvOfklWnANG4EAH5jt1N
- ftTb1XTxzvWVuR/WJK0B5MZNYM71cumBdUDtPi+nAoGAYmoIXMSnxb+8xNL10aOr
- h9ljQQp8NHgSQfyiSufvRk0YNuYh1vMnEIsqnsPrG2Zfhx/25GmvoxXGssaCorDN
- 5FAn6QK06F1ZTD5L0Y3sv4OI6G1gAuC66ZWuL6sFhyyKkQ4f1WiVZ7SCa3CHQSAO
- i9VDaKz1bf4bXvAQcNj9v9kCgYACSOZCqW4vN0OUmqsXhkt9ZB6Pb/veno70pNPR
- jmYsvcwQU3oJQpWfXkhy6RAV3epaXmPDCsUsfns2M3wqNC7a2R5xdCqjKGGzZX4A
- AO3rz9se4J6Gd5oKijeCKFlWDGNHsibrdgm2pz42nZlY+O21X74dWKbt8O16I1MW
- hxkbJQKBgAXfuen/srVkJgPuqywUYag90VWCpHsuxdn+fZJa50SyZADr+RbiDfH2
- vek8Uo8ap8AEsv4Rfs9opUcUZevLp3g2741eOaidHVLm0l4iLIVl03otGOqvSzs+
- A3tFPEOxauXpzCt8f8eXsz0WQXAgIKW2h8zu5QHjomioU3i27mtE
- -----END RSA PRIVATE KEY-----
- rsa_public: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC0/Ho+o3eJISydO2JvIgTLnZOtrxPl+fSvJfKDjoOLY0HB2eOjy2s2/2N6d9X9SGZ4+XnyVeNPjfBXw4IyXoqxhfIF16Azfk022iejgjiYssoUxH31M60OfqJhxo16dWEXdkKP1nac06VOt1zS5yEeooyvEuMJEJSsv3VR/7GKhMX3TVhEz5moLmVP3bIAvvoXio8X4urVC1R819QjDC86nlxwNks/GKPRi/IHO5tjJ72Eke7KNsm/vxHgkdX4vZaHNKhfdb/pavFXN5eoUaofz3hxw5oL/u2epI/pXyUhDp8Tb5wO6slykzcIlGCSd0YeO1TnljvViRx0uSxIy97N root@xenial-lxd
- dsa_private: |
- -----BEGIN DSA PRIVATE KEY-----
- MIIBuwIBAAKBgQD5Fstc23IVSDe6k4DNP8smPKuEWUvHDTOGaXrhOVAfzZ6+jklP
- 55mzvC7jO53PWWC31hq10xBoWdev0WtcNF9Tv+4bAa1263y51Rqo4GI7xx+xic1d
- mLqqfYijBT9k48J/1tV0cs1Wjs6FP/IJTD/kYVC930JjYQMi722lBnUxsQIVAL7i
- z3fTGKTvSzvW0wQlwnYpS2QFAoGANp+KdyS9V93HgxGQEN1rlj/TSv/a3EVdCKtE
- nQf55aPHxDAVDVw5JtRh4pZbbRV4oGRPc9KOdjo5BU28vSM3Lmhkb+UaaDXwHkgI
- nK193o74DKjADWZxuLyyiKHiMOhxozoxDfjWxs8nz6uqvSW0pr521EwIY6RajbED
- nZ2a3GkCgYEAyoUomNRB6bmpsIfzt8zdtqLP5umIj2uhr9MVPL8/QdbxmJ72Z7pf
- Q2z1B7QAdIBGOlqJXtlau7ABhWK29Efe+99ObyTSSdDc6RCDeAwUmBAiPRQhDH2E
- wExw3doDSCUb28L1B50wBzQ8mC3KXp6C7IkBXWspb16DLHUHFSI8bkICFA5kVUcW
- nCPOXEQsayANi8+Cb7BH
- -----END DSA PRIVATE KEY-----
- dsa_public: ssh-dss AAAAB3NzaC1kc3MAAACBAPkWy1zbchVIN7qTgM0/yyY8q4RZS8cNM4ZpeuE5UB/Nnr6OSU/nmbO8LuM7nc9ZYLfWGrXTEGhZ16/Ra1w0X1O/7hsBrXbrfLnVGqjgYjvHH7GJzV2Yuqp9iKMFP2Tjwn/W1XRyzVaOzoU/8glMP+RhUL3fQmNhAyLvbaUGdTGxAAAAFQC+4s930xik70s71tMEJcJ2KUtkBQAAAIA2n4p3JL1X3ceDEZAQ3WuWP9NK/9rcRV0Iq0SdB/nlo8fEMBUNXDkm1GHillttFXigZE9z0o52OjkFTby9IzcuaGRv5RpoNfAeSAicrX3ejvgMqMANZnG4vLKIoeIw6HGjOjEN+NbGzyfPq6q9JbSmvnbUTAhjpFqNsQOdnZrcaQAAAIEAyoUomNRB6bmpsIfzt8zdtqLP5umIj2uhr9MVPL8/QdbxmJ72Z7pfQ2z1B7QAdIBGOlqJXtlau7ABhWK29Efe+99ObyTSSdDc6RCDeAwUmBAiPRQhDH2EwExw3doDSCUb28L1B50wBzQ8mC3KXp6C7IkBXWspb16DLHUHFSI8bkI= root@xenial-lxd
- ed25519_private: |
- -----BEGIN OPENSSH PRIVATE KEY-----
- b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
- QyNTUxOQAAACDbnQGUruL42aVVsyHeaV5mYNTOhteXao0Nl5DVThJ2+QAAAJgwt+lcMLfp
- XAAAAAtzc2gtZWQyNTUxOQAAACDbnQGUruL42aVVsyHeaV5mYNTOhteXao0Nl5DVThJ2+Q
- AAAEDQlFZpz9q8+/YJHS9+jPAqy2ZT6cGEv8HTB6RZtTjd/dudAZSu4vjZpVWzId5pXmZg
- 1M6G15dqjQ2XkNVOEnb5AAAAD3Jvb3RAeGVuaWFsLWx4ZAECAwQFBg==
- -----END OPENSSH PRIVATE KEY-----
- ed25519_public: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINudAZSu4vjZpVWzId5pXmZg1M6G15dqjQ2XkNVOEnb5 root@xenial-lxd
- ecdsa_private: |
- -----BEGIN EC PRIVATE KEY-----
- MHcCAQEEIDuK+QFc1wmyJY8uDqQVa1qHte30Rk/fdLxGIBkwJAyOoAoGCCqGSM49
- AwEHoUQDQgAEWxLlO+TL8gL91eET9p/HFQbqR1A691AkJgZk3jY5mpZqxgX4vcgb
- 7f/CtXuM6s2svcDJqAeXr6Wk8OJJcMxylA==
- -----END EC PRIVATE KEY-----
- ecdsa_public: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFsS5Tvky/IC/dXhE/afxxUG6kdQOvdQJCYGZN42OZqWasYF+L3IG+3/wrV7jOrNrL3AyagHl6+lpPDiSXDMcpQ= root@xenial-lxd
-collect_scripts:
- dsa_public: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_dsa_key.pub
- dsa_private: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_dsa_key
- rsa_public: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_rsa_key.pub
- rsa_private: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_rsa_key
- ecdsa_public: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_ecdsa_key.pub
- ecdsa_private: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_ecdsa_key
- ed25519_public: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_ed25519_key.pub
- ed25519_private: |
- #!/bin/bash
- cat /etc/ssh/ssh_host_ed25519_key
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/timezone.py b/tests/cloud_tests/testcases/modules/timezone.py
deleted file mode 100644
index 654fa53d..00000000
--- a/tests/cloud_tests/testcases/modules/timezone.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestTimezone(base.CloudTestCase):
- """Test timezone module."""
-
- def test_timezone(self):
- """Test date prints correct timezone."""
- out = self.get_data_file('timezone')
- self.assertEqual('HDT', out.rstrip())
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/timezone.yaml b/tests/cloud_tests/testcases/modules/timezone.yaml
deleted file mode 100644
index 5112aa9f..00000000
--- a/tests/cloud_tests/testcases/modules/timezone.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-#
-# Set system timezone
-#
-required_features:
- - daylight_time
-cloud_config: |
- #cloud-config
- timezone: US/Aleutian
-collect_scripts:
- timezone: |
- #!/bin/bash
- # date will convert this to system's configured time zone.
- # use a static date to avoid dealing with daylight savings.
- date "+%Z" --date="Thu, 03 Nov 2016 00:47:00 -0400"
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/user_groups.py b/tests/cloud_tests/testcases/modules/user_groups.py
deleted file mode 100644
index 4067348d..00000000
--- a/tests/cloud_tests/testcases/modules/user_groups.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestUserGroups(base.CloudTestCase):
- """Example cloud-config test."""
-
- def test_group_ubuntu(self):
- """Test ubuntu group exists."""
- out = self.get_data_file('group_ubuntu')
- self.assertRegex(out, r'ubuntu:x:[0-9]{4}:')
-
- def test_group_cloud_users(self):
- """Test cloud users group exists."""
- out = self.get_data_file('group_cloud_users')
- self.assertRegex(out, r'cloud-users:x:[0-9]{4}:barfoo')
-
- def test_user_ubuntu(self):
- """Test ubuntu user exists."""
- out = self.get_data_file('user_ubuntu')
- self.assertRegex(
- out, r'ubuntu:x:[0-9]{4}:[0-9]{4}:Ubuntu:/home/ubuntu:/bin/bash')
-
- def test_user_foobar(self):
- """Test foobar user exists."""
- out = self.get_data_file('user_foobar')
- self.assertRegex(
- out, r'foobar:x:[0-9]{4}:[0-9]{4}:Foo B. Bar:/home/foobar:')
-
- def test_user_barfoo(self):
- """Test barfoo user exists."""
- out = self.get_data_file('user_barfoo')
- self.assertRegex(
- out, r'barfoo:x:[0-9]{4}:[0-9]{4}:Bar B. Foo:/home/barfoo:')
-
- def test_user_cloudy(self):
- """Test cloudy user exists."""
- out = self.get_data_file('user_cloudy')
- self.assertRegex(out, r'cloudy:x:[0-9]{3,4}:')
-
- def test_user_root_in_secret(self):
- """Test root user is in 'secret' group."""
- _user, _, groups = self.get_data_file('root_groups').partition(":")
- self.assertIn("secret", groups.split(),
- msg="User root is not in group 'secret'")
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/user_groups.yaml b/tests/cloud_tests/testcases/modules/user_groups.yaml
deleted file mode 100644
index 91b0e281..00000000
--- a/tests/cloud_tests/testcases/modules/user_groups.yaml
+++ /dev/null
@@ -1,55 +0,0 @@
-#
-# Create groups and users with various options
-#
-required_features:
- - ubuntu_user
-cloud_config: |
- #cloud-config
- # Add groups to the system
- groups:
- - secret: [root]
- - cloud-users
-
- # Add users to the system. Users are added after groups are added.
- users:
- - default
- - name: foobar
- gecos: Foo B. Bar
- primary_group: foobar
- groups: users
- expiredate: '2038-01-19'
- lock_passwd: false
- passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/
- - name: barfoo
- gecos: Bar B. Foo
- sudo: ALL=(ALL) NOPASSWD:ALL
- groups: [cloud-users, secret]
- lock_passwd: true
- - name: cloudy
- gecos: Magic Cloud App Daemon User
- inactive: '5'
- system: true
-collect_scripts:
- group_ubuntu: |
- #!/bin/bash
- getent group ubuntu
- group_cloud_users: |
- #!/bin/bash
- getent group cloud-users
- user_ubuntu: |
- #!/bin/bash
- getent passwd ubuntu
- user_foobar: |
- #!/bin/bash
- getent passwd foobar
- user_barfoo: |
- #!/bin/bash
- getent passwd barfoo
- user_cloudy: |
- #!/bin/bash
- getent passwd cloudy
- root_groups: |
- #!/bin/bash
- groups root
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/write_files.py b/tests/cloud_tests/testcases/modules/write_files.py
deleted file mode 100644
index 526a2ebd..00000000
--- a/tests/cloud_tests/testcases/modules/write_files.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""cloud-init Integration Test Verify Script."""
-from tests.cloud_tests.testcases import base
-
-
-class TestWriteFiles(base.CloudTestCase):
- """Example cloud-config test."""
-
- def test_b64(self):
- """Test b64 encoded file reads as ascii."""
- out = self.get_data_file('file_b64')
- self.assertIn('ASCII text', out)
-
- def test_binary(self):
- """Test binary file reads as executable."""
- out = self.get_data_file('file_binary').strip()
- md5 = "3801184b97bb8c6e63fa0e1eae2920d7"
- sha256 = ("2c791c4037ea5bd7e928d6a87380f8ba7a803cd83d"
- "5e4f269e28f5090f0f2c9a")
- self.assertIn(out, (md5 + " -", sha256 + " -"))
-
- def test_gzip(self):
- """Test gzip file shows up as a shell script."""
- out = self.get_data_file('file_gzip')
- self.assertIn('POSIX shell script, ASCII text executable', out)
-
- def test_text(self):
- """Test text shows up as ASCII text."""
- out = self.get_data_file('file_text')
- self.assertIn('ASCII text', out)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/testcases/modules/write_files.yaml b/tests/cloud_tests/testcases/modules/write_files.yaml
deleted file mode 100644
index cc7ea4bd..00000000
--- a/tests/cloud_tests/testcases/modules/write_files.yaml
+++ /dev/null
@@ -1,53 +0,0 @@
-#
-# Write various file types
-#
-# NOTE: on trusty 'file' has an output formatting error for binary files and
-# has 2 spaces in 'LSB executable', which causes a failure here
-#
-# NOTE: the binary data can be any binary data, not only executables
-# and can be generated via the base 64 command as such:
-# $ base64 < hello > hello.txt
-# the opposite is running:
-# $ base64 -d < hello.txt > hello
-#
-required_features:
- - no_file_fmt_e
-cloud_config: |
- #cloud-config
- write_files:
- - encoding: b64
- content: CiMgVGhpcyBmaWxlIGNvbnRyb2xzIHRoZSBzdGF0ZSBvZiBTRUxpbnV4
- owner: root:root
- path: /root/file_b64
- permissions: '0644'
- - content: |
- # My new /root/file_text
-
- SMBDOPTIONS="-D"
- path: /root/file_text
- - content: !!binary |
- /Z/xrHR4WINT0UNoKPQKbuovp6+Js+JK
- path: /root/file_binary
- permissions: '0555'
- - encoding: gzip
- content: !!binary |
- H4sIAIDb/U8C/1NW1E/KzNMvzuBKTc7IV8hIzcnJVyjPL8pJ4QIA6N+MVxsAAAA=
- path: /root/file_gzip
- permissions: '0755'
-collect_scripts:
- file_b64: |
- #!/bin/bash
- file /root/file_b64
- file_text: |
- #!/bin/bash
- file /root/file_text
- file_binary: |
- #!/bin/bash
- for hasher in md5sum sha256sum; do
- $hasher </root/file_binary && break
- done
- file_gzip: |
- #!/bin/bash
- file /root/file_gzip
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/util.py b/tests/cloud_tests/util.py
deleted file mode 100644
index 7dcccbdd..00000000
--- a/tests/cloud_tests/util.py
+++ /dev/null
@@ -1,532 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Utilities for re-use across integration tests."""
-
-import base64
-import copy
-import glob
-import multiprocessing
-import os
-import random
-import shlex
-import shutil
-import string
-import subprocess
-import tempfile
-import time
-import yaml
-from contextlib import contextmanager
-
-from cloudinit import subp
-from cloudinit import util as c_util
-from tests.cloud_tests import LOG
-
-OS_FAMILY_MAPPING = {
- 'debian': ['debian', 'ubuntu'],
- 'redhat': ['centos', 'rhel', 'fedora'],
- 'gentoo': ['gentoo'],
- 'freebsd': ['freebsd'],
- 'suse': ['sles'],
- 'arch': ['arch'],
-}
-
-
-def list_test_data(data_dir):
- """Find all tests with test data available in data_dir.
-
- @param data_dir: should contain <platforms>/<os_name>/<testnames>/<data>
- @return_value: {<platform>: {<os_name>: [<testname>]}}
- """
- if not os.path.isdir(data_dir):
- raise ValueError("bad data dir")
-
- res = {}
- for platform in os.listdir(data_dir):
- if not os.path.isdir(os.path.join(data_dir, platform)):
- continue
-
- res[platform] = {}
- for os_name in os.listdir(os.path.join(data_dir, platform)):
- res[platform][os_name] = [
- os.path.sep.join(f.split(os.path.sep)[-2:]) for f in
- glob.glob(os.sep.join((data_dir, platform, os_name, '*/*')))]
-
- LOG.debug('found test data: %s\n', res)
- return res
-
-
-def gen_instance_name(prefix='cloud-test', image_desc=None, use_desc=None,
- max_len=63, delim='-', max_tries=16, used_list=None,
- valid=string.ascii_lowercase + string.digits):
- """Generate an unique name for a test instance.
-
- @param prefix: name prefix, defaults to cloud-test, default should be left
- @param image_desc: short string (len <= 16) with image desc
- @param use_desc: short string (len <= 30) with usage desc
- @param max_len: maximum name length, defaults to 64 chars
- @param delim: delimiter to use between tokens
- @param max_tries: maximum tries to find a unique name before giving up
- @param used_list: already used names, or none to not check
- @param valid: string of valid characters for name
- @return_value: valid, unused name, may raise StopIteration
- """
- unknown = 'unknown'
-
- def join(*args):
- """Join args with delim."""
- return delim.join(args)
-
- def fill(*args):
- """Join name elems and fill rest with random data."""
- name = join(*args)
- num = max_len - len(name) - len(delim)
- return join(name, ''.join(random.choice(valid) for _ in range(num)))
-
- def clean(elem, max_len):
- """Filter bad characters out of elem and trim to length."""
- elem = elem.lower()[:max_len] if elem else unknown
- return ''.join(c if c in valid else delim for c in elem)
-
- return next(name for name in
- (fill(prefix, clean(image_desc, 16), clean(use_desc, 30))
- for _ in range(max_tries))
- if not used_list or name not in used_list)
-
-
-def sorted_unique(iterable, key=None, reverse=False):
- """Create unique sorted list.
-
- @param iterable: the data structure to sort
- @param key: if you have a specific key
- @param reverse: to reverse or not
- @return_value: a sorted list of unique items in iterable
- """
- return sorted(set(iterable), key=key, reverse=reverse)
-
-
-def get_os_family(os_name):
- """Get os family type for os_name.
-
- @param os_name: name of os
- @return_value: family name for os_name
- """
- return next((k for k, v in OS_FAMILY_MAPPING.items()
- if os_name.lower() in v), None)
-
-
-def current_verbosity():
- """Get verbosity currently in effect from log level.
-
- @return_value: verbosity, 0-2, 2=verbose, 0=quiet
- """
- return max(min(3 - int(LOG.level / 10), 2), 0)
-
-
-@contextmanager
-def emit_dots_on_travis():
- """
- A context manager that emits a dot every 10 seconds if running on Travis.
-
- Travis will kill jobs that don't emit output for a certain amount of time.
- This context manager spins up a background process which will emit a dot to
- stdout every 10 seconds to avoid being killed.
-
- It should be wrapped selectively around operations that are known to take a
- long time.
- """
- if os.environ.get('TRAVIS') != "true":
- # If we aren't on Travis, don't do anything.
- yield
- return
-
- def emit_dots():
- while True:
- print(".")
- time.sleep(10)
-
- dot_process = multiprocessing.Process(target=emit_dots)
- dot_process.start()
- try:
- yield
- finally:
- dot_process.terminate()
-
-
-def is_writable_dir(path):
- """Make sure dir is writable.
-
- @param path: path to determine if writable
- @return_value: boolean with result
- """
- try:
- c_util.ensure_dir(path)
- os.remove(tempfile.mkstemp(dir=os.path.abspath(path))[1])
- except (IOError, OSError):
- return False
- return True
-
-
-def is_clean_writable_dir(path):
- """Make sure dir is empty and writable, creating it if it does not exist.
-
- @param path: path to check
- @return_value: True/False if successful
- """
- path = os.path.abspath(path)
- if not (is_writable_dir(path) and len(os.listdir(path)) == 0):
- return False
- return True
-
-
-def configure_yaml():
- """Clean yaml."""
- yaml.add_representer(str, (lambda dumper, data: dumper.represent_scalar(
- 'tag:yaml.org,2002:str', data, style='|' if '\n' in data else '')))
-
-
-def yaml_format(data, content_type=None):
- """Format data as yaml.
-
- @param data: data to dump
- @param header: if specified, add a header to the dumped data
- @return_value: yaml string
- """
- configure_yaml()
- content_type = (
- '#{}\n'.format(content_type.strip('#\n')) if content_type else '')
- return content_type + yaml.dump(data, indent=2, default_flow_style=False)
-
-
-def yaml_dump(data, path):
- """Dump data to path in yaml format."""
- c_util.write_file(os.path.abspath(path), yaml_format(data), omode='w')
-
-
-def merge_results(data, path):
- """Handle merging results from collect phase and verify phase."""
- current = {}
- if os.path.exists(path):
- with open(path, 'r') as fp:
- current = c_util.load_yaml(fp.read())
- current.update(data)
- yaml_dump(current, path)
-
-
-def rel_files(basedir):
- """List of files under directory by relative path, not including dirs.
-
- @param basedir: directory to search
- @return_value: list or relative paths
- """
- basedir = os.path.normpath(basedir)
- return [path[len(basedir) + 1:] for path in
- glob.glob(os.path.join(basedir, '**'), recursive=True)
- if not os.path.isdir(path)]
-
-
-def flat_tar(output, basedir, owner='root', group='root'):
- """Create a flat tar archive (no leading ./) from basedir.
-
- @param output: output tar file to write
- @param basedir: base directory for archive
- @param owner: owner of archive files
- @param group: group archive files belong to
- @return_value: none
- """
- subp.subp(['tar', 'cf', output, '--owner', owner, '--group', group,
- '-C', basedir] + rel_files(basedir), capture=True)
-
-
-def parse_conf_list(entries, valid=None, boolean=False):
- """Parse config in a list of strings in key=value format.
-
- @param entries: list of key=value strings
- @param valid: list of valid keys in result, return None if invalid input
- @param boolean: if true, then interpret all values as booleans
- @return_value: dict of configuration or None if invalid
- """
- res = {key: value.lower() == 'true' if boolean else value
- for key, value in (i.split('=') for i in entries)}
- return res if not valid or all(k in valid for k in res.keys()) else None
-
-
-def update_args(args, updates, preserve_old=True):
- """Update cmdline arguments from a dictionary.
-
- @param args: cmdline arguments
- @param updates: dictionary of {arg_name: new_value} mappings
- @param preserve_old: if true, create a deep copy of args before updating
- @return_value: updated cmdline arguments
- """
- args = copy.deepcopy(args) if preserve_old else args
- if updates:
- vars(args).update(updates)
- return args
-
-
-def update_user_data(user_data, updates, dump_to_yaml=True):
- """Update user_data from dictionary.
-
- @param user_data: user data as yaml string or dict
- @param updates: dictionary to merge with user data
- @param dump_to_yaml: return as yaml dumped string if true
- @return_value: updated user data, as yaml string if dump_to_yaml is true
- """
- user_data = (c_util.load_yaml(user_data)
- if isinstance(user_data, str) else copy.deepcopy(user_data))
- user_data.update(updates)
- return (yaml_format(user_data, content_type='cloud-config')
- if dump_to_yaml else user_data)
-
-
-def shell_safe(cmd):
- """Produce string safe shell string.
-
- Create a string that can be passed to:
- set -- <string>
- to produce the same array that cmd represents.
-
- Internally we utilize 'getopt's ability/knowledge on how to quote
- strings to be safe for shell. This implementation could be changed
- to be pure python. It is just a matter of correctly escaping
- or quoting characters like: ' " ^ & $ ; ( ) ...
-
- @param cmd: command as a list
- """
- out = subprocess.check_output(
- ["getopt", "--shell", "sh", "--options", "", "--", "--"] + list(cmd))
- # out contains ' -- <data>\n'. drop the ' -- ' and the '\n'
- return out.decode()[4:-1]
-
-
-def shell_pack(cmd):
- """Return a string that can shuffled through 'sh' and execute cmd.
-
- In Python subprocess terms:
- check_output(cmd) == check_output(shell_pack(cmd), shell=True)
-
- @param cmd: list or string of command to pack up
- """
-
- if isinstance(cmd, str):
- cmd = [cmd]
- else:
- cmd = list(cmd)
-
- stuffed = shell_safe(cmd)
- # for whatever reason b64encode returns bytes when it is clearly
- # representable as a string by nature of being base64 encoded.
- b64 = base64.b64encode(stuffed.encode()).decode()
- return 'eval set -- "$(echo %s | base64 --decode)" && exec "$@"' % b64
-
-
-def shell_quote(cmd):
- if isinstance(cmd, (tuple, list)):
- return ' '.join([shlex.quote(x) for x in cmd])
- return shlex.quote(cmd)
-
-
-class TargetBase(object):
- _tmp_count = 0
-
- def execute(self, command, stdin=None, env=None,
- rcs=None, description=None):
- """Execute command in instance, recording output, error and exit code.
-
- Assumes functional networking and execution as root with the
- target filesystem being available at /.
-
- @param command: the command to execute as root inside the image
- if command is a string, then it will be executed as:
- ['sh', '-c', command]
- @param stdin: bytes content for standard in
- @param env: environment variables
- @param rcs: return codes.
- None (default): non-zero exit code will raise exception.
- False: any is allowed (No execption raised).
- list of int: any rc not in the list will raise exception.
- @param description: purpose of command
- @return_value: tuple containing stdout data, stderr data, exit code
- """
- if isinstance(command, str):
- command = ['sh', '-c', command]
-
- if rcs is None:
- rcs = (0,)
-
- if description:
- LOG.debug('executing "%s"', description)
- else:
- LOG.debug("executing command: %s", shell_quote(command))
-
- out, err, rc = self._execute(command=command, stdin=stdin, env=env)
-
- # False means accept anything.
- if (rcs is False or rc in rcs):
- return out, err, rc
-
- raise InTargetExecuteError(out, err, rc, command, description)
-
- def _execute(self, command, stdin=None, env=None):
- """Execute command in inside, return stdout, stderr and exit code.
-
- Assumes functional networking and execution as root with the
- target filesystem being available at /.
-
- @param stdin: bytes content for standard in
- @param env: environment variables
- @return_value: tuple containing stdout data, stderr data, exit code
-
- This is intended to be implemented by the Image or Instance.
- Many callers will use the higher level 'execute'."""
- raise NotImplementedError("_execute must be implemented by subclass.")
-
- def read_data(self, remote_path, decode=False):
- """Read data from instance filesystem.
-
- @param remote_path: path in instance
- @param decode: decode data before returning.
- @return_value: content of remote_path as bytes if 'decode' is False,
- and as string if 'decode' is True.
- """
- # when sh is invoked with '-c', then the first argument is "$0"
- # which is commonly understood as the "program name".
- # 'read_data' is the program name, and 'remote_path' is '$1'
- stdout, _stderr, rc = self._execute(
- ["sh", "-c", 'exec cat "$1"', 'read_data', remote_path])
- if rc != 0:
- raise RuntimeError("Failed to read file '%s'" % remote_path)
-
- if decode:
- return stdout.decode()
- return stdout
-
- def write_data(self, remote_path, data):
- """Write data to instance filesystem.
-
- @param remote_path: path in instance
- @param data: data to write in bytes
- """
- # when sh is invoked with '-c', then the first argument is "$0"
- # which is commonly understood as the "program name".
- # 'write_data' is the program name, and 'remote_path' is '$1'
- _, _, rc = self._execute(
- ["sh", "-c", 'exec cat >"$1"', 'write_data', remote_path],
- stdin=data)
-
- if rc != 0:
- raise RuntimeError("Failed to write to '%s'" % remote_path)
- return
-
- def pull_file(self, remote_path, local_path):
- """Copy file at 'remote_path', from instance to 'local_path'.
-
- @param remote_path: path on remote instance
- @param local_path: path on local instance
- """
- with open(local_path, 'wb') as fp:
- fp.write(self.read_data(remote_path))
-
- def push_file(self, local_path, remote_path):
- """Copy file at 'local_path' to instance at 'remote_path'.
-
- @param local_path: path on local instance
- @param remote_path: path on remote instance"""
- with open(local_path, "rb") as fp:
- self.write_data(remote_path, data=fp.read())
-
- def run_script(self, script, rcs=None, description=None):
- """Run script in target and return stdout.
-
- @param script: script contents
- @param rcs: allowed return codes from script
- @param description: purpose of script
- @return_value: stdout from script
- """
- # Just write to a file, add execute, run it, then remove it.
- shblob = '; '.join((
- 'set -e',
- 's="$1"',
- 'shift',
- 'cat > "$s"',
- 'trap "rm -f $s" EXIT',
- 'chmod +x "$s"',
- '"$s" "$@"'))
- return self.execute(
- ['sh', '-c', shblob, 'runscript', self.tmpfile()],
- stdin=script, description=description, rcs=rcs)
-
- def tmpfile(self):
- """Get a tmp file in the target.
-
- @return_value: path to new file in target
- """
- path = "/tmp/%s-%04d" % (type(self).__name__, self._tmp_count)
- self._tmp_count += 1
- return path
-
-
-class InTargetExecuteError(subp.ProcessExecutionError):
- """Error type for in target commands that fail."""
-
- default_desc = 'Unexpected error while running command.'
-
- def __init__(self, stdout, stderr, exit_code, cmd, description=None,
- reason=None):
- """Init error and parent error class."""
- super(InTargetExecuteError, self).__init__(
- stdout=stdout, stderr=stderr, exit_code=exit_code,
- cmd=shell_quote(cmd),
- description=description if description else self.default_desc,
- reason=reason)
-
-
-class PlatformError(IOError):
- """Error type for platform errors."""
-
- default_desc = 'unexpected error in platform.'
-
- def __init__(self, operation, description=None):
- """Init error and parent error class."""
- description = description if description else self.default_desc
-
- message = '%s: %s' % (operation, description)
- IOError.__init__(self, message)
-
-
-def mkdtemp(prefix='cloud_test_data'):
- return tempfile.mkdtemp(prefix=prefix)
-
-
-class TempDir(object):
- """Configurable temporary directory like tempfile.TemporaryDirectory."""
-
- def __init__(self, tmpdir=None, preserve=False, prefix='cloud_test_data_'):
- """Initialize.
-
- @param tmpdir: directory to use as tempdir
- @param preserve: if true, always preserve data on exit
- @param prefix: prefix to use for tempfile name
- """
- self.tmpdir = tmpdir
- self.preserve = preserve
- self.prefix = prefix
-
- def __enter__(self):
- """Create tempdir.
-
- @return_value: tempdir path
- """
- if not self.tmpdir:
- self.tmpdir = mkdtemp(prefix=self.prefix)
- LOG.debug('using tmpdir: %s', self.tmpdir)
- return self.tmpdir
-
- def __exit__(self, etype, value, trace):
- """Destroy tempdir if no errors occurred."""
- if etype or self.preserve:
- LOG.info('leaving data in %s', self.tmpdir)
- else:
- shutil.rmtree(self.tmpdir)
-
-# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/verify.py b/tests/cloud_tests/verify.py
deleted file mode 100644
index 0295af40..00000000
--- a/tests/cloud_tests/verify.py
+++ /dev/null
@@ -1,149 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Verify test results."""
-
-import os
-import unittest
-
-from tests.cloud_tests import (config, LOG, util, testcases)
-
-
-def verify_data(data_dir, platform, os_name, tests):
- """Verify test data is correct.
-
- @param data_dir: top level directory for all tests
- @param platform: The platform name we for this test data (e.g. lxd)
- @param os_name: The operating system under test (xenial, artful, etc.).
- @param tests: list of test names
- @return_value: {<test_name>: {passed: True/False, failures: []}}
- """
- base_dir = os.sep.join((data_dir, platform, os_name))
- runner = unittest.TextTestRunner(verbosity=util.current_verbosity())
- res = {}
- for test_name in tests:
- LOG.debug('verifying test data for %s', test_name)
-
- # get cloudconfig for test
- test_conf = config.load_test_config(test_name)
- test_module = config.name_to_module(test_name)
- cloud_conf = test_conf['cloud_config']
-
- # load script outputs
- data = {'platform': platform, 'os_name': os_name}
- test_dir = os.path.join(base_dir, test_name)
- for script_name in os.listdir(test_dir):
- with open(os.path.join(test_dir, script_name), 'rb') as fp:
- data[script_name] = fp.read()
-
- # get test suite and launch tests
- suite = testcases.get_suite(test_module, data, cloud_conf)
- suite_results = runner.run(suite)
- res[test_name] = {
- 'passed': suite_results.wasSuccessful(),
- 'failures': [{'module': type(test_class).__base__.__module__,
- 'class': type(test_class).__base__.__name__,
- 'function': str(test_class).split()[0],
- 'error': trace.splitlines()[-1],
- 'traceback': trace, }
- for test_class, trace in suite_results.failures]
- }
-
- for failure in res[test_name]['failures']:
- LOG.warning('test case: %s failed %s.%s with: %s',
- test_name, failure['class'], failure['function'],
- failure['error'])
-
- return res
-
-
-def format_test_failures(test_result):
- """Return a human-readable printable format of test failures."""
- if not test_result['failures']:
- return ''
- failure_hdr = ' test failures:'
- failure_fmt = ' * {module}.{class}.{function}\n '
- output = []
- for failure in test_result['failures']:
- if not output:
- output = [failure_hdr]
- msg = failure_fmt.format(**failure)
- if failure.get('error'):
- msg += failure['error']
- else:
- msg += failure.get('traceback', '')
- output.append(msg)
- return '\n'.join(output)
-
-
-def format_results(res):
- """Return human-readable results as a string"""
- platform_hdr = 'Platform: {platform}'
- distro_hdr = ' Distro: {distro}'
- distro_summary_fmt = (
- ' test modules passed:{passed} tests failed:{failed}')
- output = ['']
- counts = {}
- for platform, platform_data in res.items():
- output.append(platform_hdr.format(platform=platform))
- counts[platform] = {}
- for distro, distro_data in platform_data.items():
- distro_failure_output = []
- output.append(distro_hdr.format(distro=distro))
- counts[platform][distro] = {'passed': 0, 'failed': 0}
- for _, test_result in distro_data.items():
- if test_result['passed']:
- counts[platform][distro]['passed'] += 1
- else:
- counts[platform][distro]['failed'] += len(
- test_result['failures'])
- failure_output = format_test_failures(test_result)
- if failure_output:
- distro_failure_output.append(failure_output)
- output.append(
- distro_summary_fmt.format(**counts[platform][distro]))
- if distro_failure_output:
- output.extend(distro_failure_output)
- return '\n'.join(output)
-
-
-def verify(args):
- """Verify test data.
-
- @param args: directory of test data
- @return_value: 0 for success, or number of failed tests
- """
- failed = 0
- res = {}
-
- # find test data
- tests = util.list_test_data(args.data_dir)
-
- for platform in tests.keys():
- res[platform] = {}
- for os_name in tests[platform].keys():
- test_name = "platform='{}', os='{}'".format(platform, os_name)
- LOG.info('test: %s verifying test data', test_name)
-
- # run test
- res[platform][os_name] = verify_data(
- args.data_dir, platform, os_name,
- tests[platform][os_name])
-
- # handle results
- fail_list = [k for k, v in res[platform][os_name].items()
- if not v.get('passed')]
- if len(fail_list) == 0:
- LOG.info('test: %s passed all tests', test_name)
- else:
- LOG.warning('test: %s failed %s tests', test_name,
- len(fail_list))
- failed += len(fail_list)
-
- # dump results
- LOG.debug('\n---- Verify summarized results:\n%s', format_results(res))
- if args.result:
- util.merge_results({'verify': res}, args.result)
-
- return failed
-
-# vi: ts=4 expandtab
diff --git a/tests/configs/sample1.yaml b/tests/configs/sample1.yaml
deleted file mode 100644
index ae935cc0..00000000
--- a/tests/configs/sample1.yaml
+++ /dev/null
@@ -1,49 +0,0 @@
-#cloud-config
-#apt_update: false
-#apt_upgrade: true
-packages: [ bzr, pastebinit, ubuntu-dev-tools, ccache, bzr-builddeb, vim-nox, git-core, lftp ]
-
-#disable_root: False
-
-# mounts:
-# - [ ephemeral0, /mnt ]
-# - [ swap, none, swap, sw, 0, 0 ]
-
-ssh_import_id: [smoser ]
-
-#!/bin/sh
-
-output: {all: '| tee -a /var/log/cloud-init-output.log'}
-
-sm_misc:
- - &user_setup |
- set -x; exec > ~/user_setup.log 2>&1
- echo "starting at $(date -R)"
- echo "set -o vi" >> ~/.bashrc
- cat >> ~/.profile <<"EOF"
- export EDITOR=vi
- export DEB_BUILD_OPTIONS=parallel=4
- export PATH=/usr/lib/ccache:$PATH
- EOF
-
- mkdir ~/bin
- chmod 755 ~/bin
- cat > ~/bin/mdebuild <<"EOF"
- #!/bin/sh
- exec debuild --prepend-path /usr/lib/ccache "$@"
- EOF
- chmod 755 ~/bin/*
-
- #byobu-launcher-install
- byobu-ctrl-a screen 2>&1 || :
-
- echo "pinging 8.8.8.8"
- ping -c 4 8.8.8.8
-
-runcmd:
- - [ sudo, -Hu, ubuntu, sh, -c, '[ -e /var/log/cloud-init.log ] || exit 0; grep "cloud-init.*running" /var/log/cloud-init.log > ~/runcmd.log' ]
- - [ sudo, -Hu, ubuntu, sh, -c, 'read up sleep < /proc/uptime; echo $(date): runcmd up at $up | tee -a ~/runcmd.log' ]
- - [ sudo, -Hu, ubuntu, sh, -c, *user_setup ]
-
-
-byobu_by_default: user
diff --git a/tests/data/netinfo/sample-ipaddrshow-json b/tests/data/netinfo/sample-ipaddrshow-json
new file mode 100644
index 00000000..8f6a430c
--- /dev/null
+++ b/tests/data/netinfo/sample-ipaddrshow-json
@@ -0,0 +1,91 @@
+[
+ {
+ "ifindex": 1,
+ "ifname": "lo",
+ "flags": [
+ "LOOPBACK",
+ "UP",
+ "LOWER_UP"
+ ],
+ "mtu": 65536,
+ "qdisc": "noqueue",
+ "operstate": "UNKNOWN",
+ "group": "default",
+ "txqlen": 1000,
+ "link_type": "loopback",
+ "address": "00:00:00:00:00:00",
+ "broadcast": "00:00:00:00:00:00",
+ "addr_info": [
+ {
+ "family": "inet",
+ "local": "127.0.0.1",
+ "prefixlen": 8,
+ "scope": "host",
+ "label": "lo",
+ "valid_life_time": 4294967295,
+ "preferred_life_time": 4294967295
+ },
+ {
+ "family": "inet6",
+ "local": "::1",
+ "prefixlen": 128,
+ "scope": "host",
+ "valid_life_time": 4294967295,
+ "preferred_life_time": 4294967295
+ }
+ ]
+ },
+ {
+ "ifindex": 23,
+ "link_index": 24,
+ "ifname": "enp0s25",
+ "flags": [
+ "BROADCAST",
+ "MULTICAST",
+ "UP",
+ "LOWER_UP"
+ ],
+ "mtu": 1500,
+ "qdisc": "noqueue",
+ "operstate": "UP",
+ "group": "default",
+ "txqlen": 1000,
+ "link_type": "ether",
+ "address": "50:7b:9d:2c:af:91",
+ "broadcast": "ff:ff:ff:ff:ff:ff",
+ "link_netnsid": 0,
+ "addr_info": [
+ {
+ "family": "inet",
+ "local": "192.168.2.18",
+ "prefixlen": 24,
+ "metric": 100,
+ "broadcast": "192.168.2.255",
+ "scope": "global",
+ "dynamic": true,
+ "label": "enp0s25",
+ "valid_life_time": 2339,
+ "preferred_life_time": 2339
+ },
+ {
+ "family": "inet6",
+ "local": "fe80::7777:2222:1111:eeee",
+ "prefixlen": 64,
+ "scope": "global",
+ "dynamic": true,
+ "mngtmpaddr": true,
+ "noprefixroute": true,
+ "valid_life_time": 6823,
+ "preferred_life_time": 3223
+ },
+ {
+ "family": "inet6",
+ "local": "fe80::8107:2b92:867e:f8a6",
+ "prefixlen": 64,
+ "scope": "link",
+ "valid_life_time": 4294967295,
+ "preferred_life_time": 4294967295
+ }
+ ]
+ }
+]
diff --git a/tests/data/netinfo/sample-ipaddrshow-json-down b/tests/data/netinfo/sample-ipaddrshow-json-down
new file mode 100644
index 00000000..7ad5dde0
--- /dev/null
+++ b/tests/data/netinfo/sample-ipaddrshow-json-down
@@ -0,0 +1,57 @@
+[
+ {
+ "ifindex": 1,
+ "ifname": "lo",
+ "flags": [
+ "LOOPBACK",
+ "UP",
+ "LOWER_UP"
+ ],
+ "mtu": 65536,
+ "qdisc": "noqueue",
+ "operstate": "UNKNOWN",
+ "group": "default",
+ "txqlen": 1000,
+ "link_type": "loopback",
+ "address": "00:00:00:00:00:00",
+ "broadcast": "00:00:00:00:00:00",
+ "addr_info": [
+ {
+ "family": "inet",
+ "local": "127.0.0.1",
+ "prefixlen": 8,
+ "scope": "host",
+ "label": "lo",
+ "valid_life_time": 4294967295,
+ "preferred_life_time": 4294967295
+ },
+ {
+ "family": "inet6",
+ "local": "::1",
+ "prefixlen": 128,
+ "scope": "host",
+ "valid_life_time": 4294967295,
+ "preferred_life_time": 4294967295
+ }
+ ]
+ },
+ {
+ "ifindex": 23,
+ "link_index": 24,
+ "ifname": "eth0",
+ "flags": [
+ "BROADCAST",
+ "MULTICAST"
+ ],
+ "mtu": 1500,
+ "qdisc": "noqueue",
+ "operstate": "DOWN",
+ "group": "default",
+ "txqlen": 1000,
+ "link_type": "ether",
+ "address": "00:16:3e:de:51:a6",
+ "broadcast": "ff:ff:ff:ff:ff:ff",
+ "link_netnsid": 0,
+ "addr_info": []
+ }
+]
diff --git a/tests/data/netinfo/sample-ipaddrshow-output b/tests/data/netinfo/sample-ipaddrshow-output
index b2fa2672..2aa3f90c 100644
--- a/tests/data/netinfo/sample-ipaddrshow-output
+++ b/tests/data/netinfo/sample-ipaddrshow-output
@@ -4,10 +4,9 @@
inet6 ::1/128 scope host \ valid_lft forever preferred_lft forever
2: enp0s25: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP mode DEFAULT group default qlen 1000
link/ether 50:7b:9d:2c:af:91 brd ff:ff:ff:ff:ff:ff
- inet 192.168.2.18/24 brd 192.168.2.255 scope global dynamic enp0s25
+ inet 192.168.2.18/24 metric 100 brd 192.168.2.255 scope global dynamic enp0s25
valid_lft 84174sec preferred_lft 84174sec
inet6 fe80::7777:2222:1111:eeee/64 scope global
valid_lft forever preferred_lft forever
inet6 fe80::8107:2b92:867e:f8a6/64 scope link
valid_lft forever preferred_lft forever
-
diff --git a/tests/data/old_pickles/trusty-14.04.1-0.7.5.pkl b/tests/data/old_pickles/trusty-14.04.1-0.7.5.pkl
new file mode 100644
index 00000000..c7d7844b
--- /dev/null
+++ b/tests/data/old_pickles/trusty-14.04.1-0.7.5.pkl
@@ -0,0 +1,504 @@
+ccopy_reg
+_reconstructor
+p1
+(ccloudinit.sources.DataSourceNoCloud
+DataSourceNoCloudNet
+p2
+c__builtin__
+object
+p3
+NtRp4
+(dp5
+S'paths'
+p6
+g1
+(ccloudinit.helpers
+Paths
+p7
+g3
+NtRp8
+(dp9
+S'lookups'
+p10
+(dp11
+S'cloud_config'
+p12
+S'cloud-config.txt'
+p13
+sS'userdata'
+p14
+S'user-data.txt.i'
+p15
+sS'vendordata'
+p16
+S'vendor-data.txt.i'
+p17
+sS'userdata_raw'
+p18
+S'user-data.txt'
+p19
+sS'boothooks'
+p20
+g20
+sS'scripts'
+p21
+g21
+sS'sem'
+p22
+g22
+sS'data'
+p23
+g23
+sS'vendor_scripts'
+p24
+S'scripts/vendor'
+p25
+sS'handlers'
+p26
+g26
+sS'obj_pkl'
+p27
+S'obj.pkl'
+p28
+sS'vendordata_raw'
+p29
+S'vendor-data.txt'
+p30
+sS'vendor_cloud_config'
+p31
+S'vendor-cloud-config.txt'
+p32
+ssS'template_tpl'
+p33
+S'/etc/cloud/templates/%s.tmpl'
+p34
+sS'cfgs'
+p35
+(dp36
+S'cloud_dir'
+p37
+S'/var/lib/cloud/'
+p38
+sS'templates_dir'
+p39
+S'/etc/cloud/templates/'
+p40
+sS'upstart_dir'
+p41
+S'/etc/init/'
+p42
+ssS'cloud_dir'
+p43
+g38
+sS'datasource'
+p44
+NsS'upstart_conf_d'
+p45
+g42
+sS'boot_finished'
+p46
+S'/var/lib/cloud/instance/boot-finished'
+p47
+sS'instance_link'
+p48
+S'/var/lib/cloud/instance'
+p49
+sS'seed_dir'
+p50
+S'/var/lib/cloud/seed'
+p51
+sbsS'supported_seed_starts'
+p52
+(S'http://'
+S'https://'
+S'ftp://'
+tp53
+sS'sys_cfg'
+p54
+(dp55
+S'output'
+p56
+(dp57
+S'all'
+p58
+S'| tee -a /var/log/cloud-init-output.log'
+p59
+ssS'users'
+p60
+(lp61
+S'default'
+p62
+asS'def_log_file'
+p63
+S'/var/log/cloud-init.log'
+p64
+sS'cloud_final_modules'
+p65
+(lp66
+S'rightscale_userdata'
+p67
+aS'scripts-vendor'
+p68
+aS'scripts-per-once'
+p69
+aS'scripts-per-boot'
+p70
+aS'scripts-per-instance'
+p71
+aS'scripts-user'
+p72
+aS'ssh-authkey-fingerprints'
+p73
+aS'keys-to-console'
+p74
+aS'phone-home'
+p75
+aS'final-message'
+p76
+aS'power-state-change'
+p77
+asS'disable_root'
+p78
+I01
+sS'syslog_fix_perms'
+p79
+S'syslog:adm'
+p80
+sS'log_cfgs'
+p81
+(lp82
+(lp83
+S'[loggers]\nkeys=root,cloudinit\n\n[handlers]\nkeys=consoleHandler,cloudLogHandler\n\n[formatters]\nkeys=simpleFormatter,arg0Formatter\n\n[logger_root]\nlevel=DEBUG\nhandlers=consoleHandler,cloudLogHandler\n\n[logger_cloudinit]\nlevel=DEBUG\nqualname=cloudinit\nhandlers=\npropagate=1\n\n[handler_consoleHandler]\nclass=StreamHandler\nlevel=WARNING\nformatter=arg0Formatter\nargs=(sys.stderr,)\n\n[formatter_arg0Formatter]\nformat=%(asctime)s - %(filename)s[%(levelname)s]: %(message)s\n\n[formatter_simpleFormatter]\nformat=[CLOUDINIT] %(filename)s[%(levelname)s]: %(message)s\n'
+p84
+aS'[handler_cloudLogHandler]\nclass=handlers.SysLogHandler\nlevel=DEBUG\nformatter=simpleFormatter\nargs=("/dev/log", handlers.SysLogHandler.LOG_USER)\n'
+p85
+aa(lp86
+g84
+aS"[handler_cloudLogHandler]\nclass=FileHandler\nlevel=DEBUG\nformatter=arg0Formatter\nargs=('/var/log/cloud-init.log',)\n"
+p87
+aasS'cloud_init_modules'
+p88
+(lp89
+S'migrator'
+p90
+aS'seed_random'
+p91
+aS'bootcmd'
+p92
+aS'write-files'
+p93
+aS'growpart'
+p94
+aS'resizefs'
+p95
+aS'set_hostname'
+p96
+aS'update_hostname'
+p97
+aS'update_etc_hosts'
+p98
+aS'ca-certs'
+p99
+aS'rsyslog'
+p100
+aS'users-groups'
+p101
+aS'ssh'
+p102
+asS'preserve_hostname'
+p103
+I00
+sS'_log'
+p104
+(lp105
+g84
+ag87
+ag85
+asS'datasource_list'
+p106
+(lp107
+S'NoCloud'
+p108
+aS'ConfigDrive'
+p109
+aS'OpenNebula'
+p110
+aS'Azure'
+p111
+aS'AltCloud'
+p112
+aS'OVF'
+p113
+aS'MAAS'
+p114
+aS'GCE'
+p115
+aS'OpenStack'
+p116
+aS'CloudSigma'
+p117
+aS'Ec2'
+p118
+aS'CloudStack'
+p119
+aS'SmartOS'
+p120
+aS'None'
+p121
+asS'vendor_data'
+p122
+(dp123
+S'prefix'
+p124
+(lp125
+sS'enabled'
+p126
+I01
+ssS'cloud_config_modules'
+p127
+(lp128
+S'emit_upstart'
+p129
+aS'disk_setup'
+p130
+aS'mounts'
+p131
+aS'ssh-import-id'
+p132
+aS'locale'
+p133
+aS'set-passwords'
+p134
+aS'grub-dpkg'
+p135
+aS'apt-pipelining'
+p136
+aS'apt-configure'
+p137
+aS'package-update-upgrade-install'
+p138
+aS'landscape'
+p139
+aS'timezone'
+p140
+aS'puppet'
+p141
+aS'chef'
+p142
+aS'salt-minion'
+p143
+aS'mcollective'
+p144
+aS'disable-ec2-metadata'
+p145
+aS'runcmd'
+p146
+aS'byobu'
+p147
+assg14
+Nsg16
+Nsg18
+S'#cloud-config\n{}\n\n'
+p148
+sg29
+S'#cloud-config\n{}\n\n'
+p149
+sS'dsmode'
+p150
+S'net'
+p151
+sS'seed'
+p152
+S'/var/lib/cloud/seed/nocloud-net'
+p153
+sS'cmdline_id'
+p154
+S'ds=nocloud-net'
+p155
+sS'ud_proc'
+p156
+g1
+(ccloudinit.user_data
+UserDataProcessor
+p157
+g3
+NtRp158
+(dp159
+g6
+g8
+sS'ssl_details'
+p160
+(dp161
+sbsg50
+g153
+sS'ds_cfg'
+p162
+(dp163
+sS'distro'
+p164
+g1
+(ccloudinit.distros.ubuntu
+Distro
+p165
+g3
+NtRp166
+(dp167
+S'osfamily'
+p168
+S'debian'
+p169
+sS'_paths'
+p170
+g8
+sS'name'
+p171
+S'ubuntu'
+p172
+sS'_runner'
+p173
+g1
+(ccloudinit.helpers
+Runners
+p174
+g3
+NtRp175
+(dp176
+g6
+g8
+sS'sems'
+p177
+(dp178
+sbsS'_cfg'
+p179
+(dp180
+S'paths'
+p181
+(dp182
+g37
+g38
+sg39
+g40
+sg41
+g42
+ssS'default_user'
+p183
+(dp184
+S'shell'
+p185
+S'/bin/bash'
+p186
+sS'name'
+p187
+S'ubuntu'
+p188
+sS'sudo'
+p189
+(lp190
+S'ALL=(ALL) NOPASSWD:ALL'
+p191
+asS'lock_passwd'
+p192
+I01
+sS'gecos'
+p193
+S'Ubuntu'
+p194
+sS'groups'
+p195
+(lp196
+S'adm'
+p197
+aS'audio'
+p198
+aS'cdrom'
+p199
+aS'dialout'
+p200
+aS'dip'
+p201
+aS'floppy'
+p202
+aS'netdev'
+p203
+aS'plugdev'
+p204
+aS'sudo'
+p205
+aS'video'
+p206
+assS'package_mirrors'
+p207
+(lp208
+(dp209
+S'arches'
+p210
+(lp211
+S'i386'
+p212
+aS'amd64'
+p213
+asS'failsafe'
+p214
+(dp215
+S'security'
+p216
+S'http://security.ubuntu.com/ubuntu'
+p217
+sS'primary'
+p218
+S'http://archive.ubuntu.com/ubuntu'
+p219
+ssS'search'
+p220
+(dp221
+S'security'
+p222
+(lp223
+sS'primary'
+p224
+(lp225
+S'http://%(ec2_region)s.ec2.archive.ubuntu.com/ubuntu/'
+p226
+aS'http://%(availability_zone)s.clouds.archive.ubuntu.com/ubuntu/'
+p227
+aS'http://%(region)s.clouds.archive.ubuntu.com/ubuntu/'
+p228
+assa(dp229
+S'arches'
+p230
+(lp231
+S'armhf'
+p232
+aS'armel'
+p233
+aS'default'
+p234
+asS'failsafe'
+p235
+(dp236
+S'security'
+p237
+S'http://ports.ubuntu.com/ubuntu-ports'
+p238
+sS'primary'
+p239
+S'http://ports.ubuntu.com/ubuntu-ports'
+p240
+ssasS'ssh_svcname'
+p241
+S'ssh'
+p242
+ssbsS'metadata'
+p243
+(dp244
+g150
+g151
+sS'local-hostname'
+p245
+S'trusty-upgrade2'
+p246
+sS'instance-id'
+p247
+S'trusty-upgrade2'
+p248
+ssb. \ No newline at end of file
diff --git a/tests/integration_tests/__init__.py b/tests/integration_tests/__init__.py
new file mode 100644
index 00000000..81f9b02f
--- /dev/null
+++ b/tests/integration_tests/__init__.py
@@ -0,0 +1,14 @@
+import random
+
+
+def random_mac_address() -> str:
+ """Generate a random MAC address.
+
+ The MAC address will have a 1 in its least significant bit, indicating it
+ to be a locally administered address.
+ """
+ return "02:00:00:%02x:%02x:%02x" % (
+ random.randint(0, 255),
+ random.randint(0, 255),
+ random.randint(0, 255),
+ )
diff --git a/tests/integration_tests/assets/keys/id_rsa.test1 b/tests/integration_tests/assets/keys/id_rsa.test1
new file mode 100644
index 00000000..bd4c822e
--- /dev/null
+++ b/tests/integration_tests/assets/keys/id_rsa.test1
@@ -0,0 +1,38 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABlwAAAAdzc2gtcn
+NhAAAAAwEAAQAAAYEAtRlG96aJ23URvAgO/bBsuLl+lquc350aSwV98/i8vlvOn5GVcHye
+t/rXQg4lZ4s0owG3kWyQFY8nvTk+G+UNU8fN0anAzBDi+4MzsejkF9scjTMFmXVrIpICqV
+3bYQNjPv6r+ubQdkD01du3eB9t5/zl84gtshp0hBdofyz8u1/A25s7fVU67GyI7PdKvaS+
+yvJSInZnb2e9VQzfJC+qAnN7gUZatBKjdgUtJeiUUeDaVnaS17b0aoT9iBO0sIcQtOTBlY
+lCjFt1TAMLZ64Hj3SfGZB7Yj0Z+LzFB2IWX1zzsjI68YkYPKOSL/NYhQU9e55kJQ7WnngN
+HY/2n/A7dNKSFDmgM5c9IWgeZ7fjpsfIYAoJ/CAxFIND+PEHd1gCS6xoEhaUVyh5WH/Xkw
+Kv1nx4AiZ2BFCE+75kySRLZUJ+5y0r3DU5ktMXeURzVIP7pu0R8DCul+GU+M/+THyWtAEO
+geaNJ6fYpo2ipDhbmTYt3kk2lMIapRxGBFs+37sdAAAFgGGJssNhibLDAAAAB3NzaC1yc2
+EAAAGBALUZRvemidt1EbwIDv2wbLi5fparnN+dGksFffP4vL5bzp+RlXB8nrf610IOJWeL
+NKMBt5FskBWPJ705PhvlDVPHzdGpwMwQ4vuDM7Ho5BfbHI0zBZl1ayKSAqld22EDYz7+q/
+rm0HZA9NXbt3gfbef85fOILbIadIQXaH8s/LtfwNubO31VOuxsiOz3Sr2kvsryUiJ2Z29n
+vVUM3yQvqgJze4FGWrQSo3YFLSXolFHg2lZ2kte29GqE/YgTtLCHELTkwZWJQoxbdUwDC2
+euB490nxmQe2I9Gfi8xQdiFl9c87IyOvGJGDyjki/zWIUFPXueZCUO1p54DR2P9p/wO3TS
+khQ5oDOXPSFoHme346bHyGAKCfwgMRSDQ/jxB3dYAkusaBIWlFcoeVh/15MCr9Z8eAImdg
+RQhPu+ZMkkS2VCfuctK9w1OZLTF3lEc1SD+6btEfAwrpfhlPjP/kx8lrQBDoHmjSen2KaN
+oqQ4W5k2Ld5JNpTCGqUcRgRbPt+7HQAAAAMBAAEAAAGBAJJCTOd70AC2ptEGbR0EHHqADT
+Wgefy7A94tHFEqxTy0JscGq/uCGimaY7kMdbcPXT59B4VieWeAC2cuUPP0ZHQSfS5ke7oT
+tU3N47U+0uBVbNS4rUAH7bOo2o9wptnOA5x/z+O+AARRZ6tEXQOd1oSy4gByLf2Wkh2QTi
+vP6Hln1vlFgKEzcXg6G8fN3MYWxKRhWmZM3DLERMvorlqqSBLcs5VvfZfLKcsKWTExioAq
+KgwEjYm8T9+rcpsw1xBus3j9k7wCI1Sus6PCDjq0pcYKLMYM7p8ygnU2tRYrOztdIxgWRA
+w/1oenm1Mqq2tV5xJcBCwCLOGe6SFwkIRywOYc57j5McH98Xhhg9cViyyBdXy/baF0mro+
+qPhOsWDxqwD4VKZ9UmQ6O8kPNKcc7QcIpFJhcO0g9zbp/MT0KueaWYrTKs8y4lUkTT7Xz6
++MzlR122/JwlAbBo6Y2kWtB+y+XwBZ0BfyJsm2czDhKm7OI5KfuBNhq0tFfKwOlYBq4QAA
+AMAyvUof1R8LLISkdO3EFTKn5RGNkPPoBJmGs6LwvU7NSjjLj/wPQe4jsIBc585tvbrddp
+60h72HgkZ5tqOfdeBYOKqX0qQQBHUEvI6M+NeQTQRev8bCHMLXQ21vzpClnrwNzlja359E
+uTRfiPRwIlyPLhOUiClBDSAnBI9h82Hkk3zzsQ/xGfsPB7iOjRbW69bMRSVCRpeweCVmWC
+77DTsEOq69V2TdljhQNIXE5OcOWonIlfgPiI74cdd+dLhzc/AAAADBAO1/JXd2kYiRyNkZ
+aXTLcwiSgBQIYbobqVP3OEtTclr0P1JAvby3Y4cCaEhkenx+fBqgXAku5lKM+U1Q9AEsMk
+cjIhaDpb43rU7GPjMn4zHwgGsEKd5pC1yIQ2PlK+cHanAdsDjIg+6RR+fuvid/mBeBOYXb
+Py0sa3HyekLJmCdx4UEyNASoiNaGFLQVAqo+RACsXy6VMxFH5dqDYlvwrfUQLwxJmse9Vb
+GEuuPAsklNugZqssC2XOIujFVUpslduQAAAMEAwzVHQVtsc3icCSzEAARpDTUdTbI29OhB
+/FMBnjzS9/3SWfLuBOSm9heNCHs2jdGNb8cPdKZuY7S9Fx6KuVUPyTbSSYkjj0F4fTeC9g
+0ym4p4UWYdF67WSWwLORkaG8K0d+G/CXkz8hvKUg6gcZWKBHAE1ROrHu1nsc8v7mkiKq4I
+bnTw5Q9TgjbWcQWtgPq0wXyyl/K8S1SFdkMCTOHDD0RQ+jTV2WNGVwFTodIRHenX+Rw2g4
+CHbTWbsFrHR1qFAAAACmphbWVzQG5ld3Q=
+-----END OPENSSH PRIVATE KEY-----
diff --git a/tests/integration_tests/assets/keys/id_rsa.test1.pub b/tests/integration_tests/assets/keys/id_rsa.test1.pub
new file mode 100644
index 00000000..3d2e26e1
--- /dev/null
+++ b/tests/integration_tests/assets/keys/id_rsa.test1.pub
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC1GUb3ponbdRG8CA79sGy4uX6Wq5zfnRpLBX3z+Ly+W86fkZVwfJ63+tdCDiVnizSjAbeRbJAVjye9OT4b5Q1Tx83RqcDMEOL7gzOx6OQX2xyNMwWZdWsikgKpXdthA2M+/qv65tB2QPTV27d4H23n/OXziC2yGnSEF2h/LPy7X8Dbmzt9VTrsbIjs90q9pL7K8lIidmdvZ71VDN8kL6oCc3uBRlq0EqN2BS0l6JRR4NpWdpLXtvRqhP2IE7SwhxC05MGViUKMW3VMAwtnrgePdJ8ZkHtiPRn4vMUHYhZfXPOyMjrxiRg8o5Iv81iFBT17nmQlDtaeeA0dj/af8Dt00pIUOaAzlz0haB5nt+Omx8hgCgn8IDEUg0P48Qd3WAJLrGgSFpRXKHlYf9eTAq/WfHgCJnYEUIT7vmTJJEtlQn7nLSvcNTmS0xd5RHNUg/um7RHwMK6X4ZT4z/5MfJa0AQ6B5o0np9imjaKkOFuZNi3eSTaUwhqlHEYEWz7fux0= test1@host
diff --git a/tests/integration_tests/assets/keys/id_rsa.test2 b/tests/integration_tests/assets/keys/id_rsa.test2
new file mode 100644
index 00000000..5854d901
--- /dev/null
+++ b/tests/integration_tests/assets/keys/id_rsa.test2
@@ -0,0 +1,38 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABlwAAAAdzc2gtcn
+NhAAAAAwEAAQAAAYEAvK50D2PWOc4ikyHVRJS6tDhqzjL5cKiivID4p1X8BYCVw83XAEGO
+LnItUyVXHNADlh6fpVq1NY6A2JVtygoPF6ZFx8ph7IWMmnhDdnxLLyGsbhd1M1tiXJD/R+
+3WnGHRJ4PKrQavMLgqHRrieV3QVVfjFSeo6jX/4TruP6ZmvITMZWJrXaGphxJ/pPykEdkO
+i8AmKU9FNviojyPS2nNtj9B/635IdgWvrd7Vf5Ycsw9MR55LWSidwa856RH62Yl6LpEGTH
+m1lJiMk1u88JPSqvohhaUkLKkFpcQwcB0m76W1KOyllJsmX8bNXrlZsI+WiiYI7Xl5vQm2
+17DEuNeavtPAtDMxu8HmTg2UJ55Naxehbfe2lx2k5kYGGw3i1O1OVN2pZ2/OB71LucYd/5
+qxPaz03wswcGOJYGPkNc40vdES/Scc7Yt8HsnZuzqkyOgzn0HiUCzoYUYLYTpLf+yGmwxS
+yAEY056aOfkCsboKHOKiOmlJxNaZZFQkX1evep4DAAAFgC7HMbUuxzG1AAAAB3NzaC1yc2
+EAAAGBALyudA9j1jnOIpMh1USUurQ4as4y+XCooryA+KdV/AWAlcPN1wBBji5yLVMlVxzQ
+A5Yen6VatTWOgNiVbcoKDxemRcfKYeyFjJp4Q3Z8Sy8hrG4XdTNbYlyQ/0ft1pxh0SeDyq
+0GrzC4Kh0a4nld0FVX4xUnqOo1/+E67j+mZryEzGVia12hqYcSf6T8pBHZDovAJilPRTb4
+qI8j0tpzbY/Qf+t+SHYFr63e1X+WHLMPTEeeS1koncGvOekR+tmJei6RBkx5tZSYjJNbvP
+CT0qr6IYWlJCypBaXEMHAdJu+ltSjspZSbJl/GzV65WbCPloomCO15eb0JttewxLjXmr7T
+wLQzMbvB5k4NlCeeTWsXoW33tpcdpOZGBhsN4tTtTlTdqWdvzge9S7nGHf+asT2s9N8LMH
+BjiWBj5DXONL3REv0nHO2LfB7J2bs6pMjoM59B4lAs6GFGC2E6S3/shpsMUsgBGNOemjn5
+ArG6ChziojppScTWmWRUJF9Xr3qeAwAAAAMBAAEAAAGASj/kkEHbhbfmxzujL2/P4Sfqb+
+aDXqAeGkwujbs6h/fH99vC5ejmSMTJrVSeaUo6fxLiBDIj6UWA0rpLEBzRP59BCpRL4MXV
+RNxav/+9nniD4Hb+ug0WMhMlQmsH71ZW9lPYqCpfOq7ec8GmqdgPKeaCCEspH7HMVhfYtd
+eHylwAC02lrpz1l5/h900sS5G9NaWR3uPA+xbzThDs4uZVkSidjlCNt1QZhDSSk7jA5n34
+qJ5UTGu9WQDZqyxWKND+RIyQuFAPGQyoyCC1FayHO2sEhT5qHuumL14Mn81XpzoXFoKyql
+rhBDe+pHhKArBYt92Evch0k1ABKblFxtxLXcvk4Fs7pHi+8k4+Cnazej2kcsu1kURlMZJB
+w2QT/8BV4uImbH05LtyscQuwGzpIoxqrnHrvg5VbohStmhoOjYybzqqW3/M0qhkn5JgTiy
+dJcHRJisRnAcmbmEchYtLDi6RW1e022H4I9AFXQqyr5HylBq6ugtWcFCsrcX8ibZ8xAAAA
+wQCAOPgwae6yZLkrYzRfbxZtGKNmhpI0EtNSDCHYuQQapFZJe7EFENs/VAaIiiut0yajGj
+c3aoKcwGIoT8TUM8E3GSNW6+WidUOC7H6W+/6N2OYZHRBACGz820xO+UBCl2oSk+dLBlfr
+IQzBGUWn5uVYCs0/2nxfCdFyHtMK8dMF/ypbdG+o1rXz5y9b7PVG6Mn+o1Rjsdkq7VERmy
+Pukd8hwATOIJqoKl3TuFyBeYFLqe+0e7uTeswQFw17PF31VjAAAADBAOpJRQb8c6qWqsvv
+vkve0uMuL0DfWW0G6+SxjPLcV6aTWL5xu0Grd8uBxDkkHU/CDrAwpchXyuLsvbw21Eje/u
+U5k9nLEscWZwcX7odxlK+EfAY2Bf5+Hd9bH5HMzTRJH8KkWK1EppOLPyiDxz4LZGzPLVyv
+/1PgSuvXkSWk1KIE4SvSemyxGX2tPVI6uO+URqevfnPOS1tMB7BMQlgkR6eh4bugx9UYx9
+mwlXonNa4dN0iQxZ7N4rKFBbT/uyB2bQAAAMEAzisnkD8k9Tn8uyhxpWLHwb03X4ZUUHDV
+zu15e4a8dZ+mM8nHO986913Xz5JujlJKkGwFTvgWkIiR2zqTEauZHARH7gANpaweTm6lPd
+E4p2S0M3ulY7xtp9lCFIrDhMPPkGq8SFZB6qhgucHcZSRLq6ZDou3S2IdNOzDTpBtkhRCS
+0zFcdTLh3zZweoy8HGbW36bwB6s1CIL76Pd4F64i0Ms9CCCU6b+E5ArFhYQIsXiDbgHWbD
+tZRSm2GEgnDGAvAAAACmphbWVzQG5ld3Q=
+-----END OPENSSH PRIVATE KEY-----
diff --git a/tests/integration_tests/assets/keys/id_rsa.test2.pub b/tests/integration_tests/assets/keys/id_rsa.test2.pub
new file mode 100644
index 00000000..f3831a57
--- /dev/null
+++ b/tests/integration_tests/assets/keys/id_rsa.test2.pub
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC8rnQPY9Y5ziKTIdVElLq0OGrOMvlwqKK8gPinVfwFgJXDzdcAQY4uci1TJVcc0AOWHp+lWrU1joDYlW3KCg8XpkXHymHshYyaeEN2fEsvIaxuF3UzW2JckP9H7dacYdEng8qtBq8wuCodGuJ5XdBVV+MVJ6jqNf/hOu4/pma8hMxlYmtdoamHEn+k/KQR2Q6LwCYpT0U2+KiPI9Lac22P0H/rfkh2Ba+t3tV/lhyzD0xHnktZKJ3BrznpEfrZiXoukQZMebWUmIyTW7zwk9Kq+iGFpSQsqQWlxDBwHSbvpbUo7KWUmyZfxs1euVmwj5aKJgjteXm9CbbXsMS415q+08C0MzG7weZODZQnnk1rF6Ft97aXHaTmRgYbDeLU7U5U3alnb84HvUu5xh3/mrE9rPTfCzBwY4lgY+Q1zjS90RL9Jxzti3weydm7OqTI6DOfQeJQLOhhRgthOkt/7IabDFLIARjTnpo5+QKxugoc4qI6aUnE1plkVCRfV696ngM= test2@host
diff --git a/tests/integration_tests/assets/keys/id_rsa.test3 b/tests/integration_tests/assets/keys/id_rsa.test3
new file mode 100644
index 00000000..2596c762
--- /dev/null
+++ b/tests/integration_tests/assets/keys/id_rsa.test3
@@ -0,0 +1,38 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABlwAAAAdzc2gtcn
+NhAAAAAwEAAQAAAYEApPG4MdkYQKD57/qreFrh9GRC22y66qZOWZWRjC887rrbvBzO69hV
+yJpTIXleJEvpWiHYcjMR5G6NNFsnNtZ4fxDqmSc4vcFj53JsE/XNqLKq6psXadCb5vkNpG
+bxA+Z5bJlzJ969PgJIIEbgc86sei4kgR2MuPWqtZbY5GkpNCTqWuLYeFK+14oFruA2nyWH
+9MOIRDHK/d597psHy+LTMtymO7ZPhO571abKw6jvvwiSeDxVE9kV7KAQIuM9/S3gftvgQQ
+ron3GL34pgmIabdSGdbfHqGDooryJhlbquJZELBN236KgRNTCAjVvUzjjQr1eRP3xssGwV
+O6ECBGCQLl/aYogAgtwnwj9iXqtfiLK3EwlgjquU4+JQ0CVtLhG3gIZB+qoMThco0pmHTr
+jtfQCwrztsBBFunSa2/CstuV1mQ5O5ZrZ6ACo9yPRBNkns6+CiKdtMtCtzi3k2RDz9jpYm
+Pcak03Lr7IkdC1Tp6+jA+//yPHSO1o4CqW89IQzNAAAFgEUd7lZFHe5WAAAAB3NzaC1yc2
+EAAAGBAKTxuDHZGECg+e/6q3ha4fRkQttsuuqmTlmVkYwvPO6627wczuvYVciaUyF5XiRL
+6Voh2HIzEeRujTRbJzbWeH8Q6pknOL3BY+dybBP1zaiyquqbF2nQm+b5DaRm8QPmeWyZcy
+fevT4CSCBG4HPOrHouJIEdjLj1qrWW2ORpKTQk6lri2HhSvteKBa7gNp8lh/TDiEQxyv3e
+fe6bB8vi0zLcpju2T4Tue9WmysOo778Ikng8VRPZFeygECLjPf0t4H7b4EEK6J9xi9+KYJ
+iGm3UhnW3x6hg6KK8iYZW6riWRCwTdt+ioETUwgI1b1M440K9XkT98bLBsFTuhAgRgkC5f
+2mKIAILcJ8I/Yl6rX4iytxMJYI6rlOPiUNAlbS4Rt4CGQfqqDE4XKNKZh0647X0AsK87bA
+QRbp0mtvwrLbldZkOTuWa2egAqPcj0QTZJ7OvgoinbTLQrc4t5NkQ8/Y6WJj3GpNNy6+yJ
+HQtU6evowPv/8jx0jtaOAqlvPSEMzQAAAAMBAAEAAAGAGaqbdPZJNdVWzyb8g6/wtSzc0n
+Qq6dSTIJGLonq/So69HpqFAGIbhymsger24UMGvsXBfpO/1wH06w68HWZmPa+OMeLOi4iK
+WTuO4dQ/+l5DBlq32/lgKSLcIpb6LhcxEdsW9j9Mx1dnjc45owun/yMq/wRwH1/q/nLIsV
+JD3R9ZcGcYNDD8DWIm3D17gmw+qbG7hJES+0oh4n0xS2KyZpm7LFOEMDVEA8z+hE/HbryQ
+vjD1NC91n+qQWD1wKfN3WZDRwip3z1I5VHMpvXrA/spHpa9gzHK5qXNmZSz3/dfA1zHjCR
+2dHjJnrIUH8nyPfw8t+COC+sQBL3Nr0KUWEFPRM08cOcQm4ctzg17aDIZBONjlZGKlReR8
+1zfAw84Q70q2spLWLBLXSFblHkaOfijEbejIbaz2UUEQT27WD7RHAORdQlkx7eitk66T9d
+DzIq/cpYhm5Fs8KZsh3PLldp9nsHbD2Oa9J9LJyI4ryuIW0mVwRdvPSiiYi3K+mDCpAAAA
+wBe+ugEEJ+V7orb1f4Zez0Bd4FNkEc52WZL4CWbaCtM+ZBg5KnQ6xW14JdC8IS9cNi/I5P
+yLsBvG4bWPLGgQruuKY6oLueD6BFnKjqF6ACUCiSQldh4BAW1nYc2U48+FFvo3ZQyudFSy
+QEFlhHmcaNMDo0AIJY5Xnq2BG3nEX7AqdtZ8hhenHwLCRQJatDwSYBHDpSDdh9vpTnGp/2
+0jBz25Ko4UANzvSAc3sA4yN3jfpoM366TgdNf8x3g1v7yljQAAAMEA0HSQjzH5nhEwB58k
+mYYxnBYp1wb86zIuVhAyjZaeinvBQSTmLow8sXIHcCVuD3CgBezlU2SX5d9YuvRU9rcthi
+uzn4wWnbnzYy4SwzkMJXchUAkumFVD8Hq5TNPh2Z+033rLLE08EhYypSeVpuzdpFoStaS9
+3DUZA2bR/zLZI9MOVZRUcYImNegqIjOYHY8Sbj3/0QPV6+WpUJFMPvvedWhfaOsRMTA6nr
+VLG4pxkrieVl0UtuRGbzD/exXhXVi7AAAAwQDKkJj4ez/+KZFYlZQKiV0BrfUFcgS6ElFM
+2CZIEagCtu8eedrwkNqx2FUX33uxdvUTr4c9I3NvWeEEGTB9pgD4lh1x/nxfuhyGXtimFM
+GnznGV9oyz0DmKlKiKSEGwWf5G+/NiiCwwVJ7wsQQm7TqNtkQ9b8MhWWXC7xlXKUs7dmTa
+e8AqAndCCMEnbS1UQFO/R5PNcZXkFWDggLQ/eWRYKlrXgdnUgH6h0saOcViKpNJBUXb3+x
+eauhOY52PS/BcAAAAKamFtZXNAbmV3dAE=
+-----END OPENSSH PRIVATE KEY-----
diff --git a/tests/integration_tests/assets/keys/id_rsa.test3.pub b/tests/integration_tests/assets/keys/id_rsa.test3.pub
new file mode 100644
index 00000000..057db632
--- /dev/null
+++ b/tests/integration_tests/assets/keys/id_rsa.test3.pub
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCk8bgx2RhAoPnv+qt4WuH0ZELbbLrqpk5ZlZGMLzzuutu8HM7r2FXImlMheV4kS+laIdhyMxHkbo00Wyc21nh/EOqZJzi9wWPncmwT9c2osqrqmxdp0Jvm+Q2kZvED5nlsmXMn3r0+AkggRuBzzqx6LiSBHYy49aq1ltjkaSk0JOpa4th4Ur7XigWu4DafJYf0w4hEMcr93n3umwfL4tMy3KY7tk+E7nvVpsrDqO+/CJJ4PFUT2RXsoBAi4z39LeB+2+BBCuifcYvfimCYhpt1IZ1t8eoYOiivImGVuq4lkQsE3bfoqBE1MICNW9TOONCvV5E/fGywbBU7oQIEYJAuX9piiACC3CfCP2Jeq1+IsrcTCWCOq5Tj4lDQJW0uEbeAhkH6qgxOFyjSmYdOuO19ALCvO2wEEW6dJrb8Ky25XWZDk7lmtnoAKj3I9EE2Sezr4KIp20y0K3OLeTZEPP2OliY9xqTTcuvsiR0LVOnr6MD7//I8dI7WjgKpbz0hDM0= test3@host
diff --git a/tests/integration_tests/assets/test_version_change.pkl b/tests/integration_tests/assets/test_version_change.pkl
new file mode 100644
index 00000000..65ae93e5
--- /dev/null
+++ b/tests/integration_tests/assets/test_version_change.pkl
Binary files differ
diff --git a/tests/integration_tests/assets/trusty_with_mime.pkl b/tests/integration_tests/assets/trusty_with_mime.pkl
new file mode 100644
index 00000000..a4089ecf
--- /dev/null
+++ b/tests/integration_tests/assets/trusty_with_mime.pkl
@@ -0,0 +1,572 @@
+ccopy_reg
+_reconstructor
+p1
+(ccloudinit.sources.DataSourceNoCloud
+DataSourceNoCloudNet
+p2
+c__builtin__
+object
+p3
+NtRp4
+(dp5
+S'paths'
+p6
+g1
+(ccloudinit.helpers
+Paths
+p7
+g3
+NtRp8
+(dp9
+S'lookups'
+p10
+(dp11
+S'cloud_config'
+p12
+S'cloud-config.txt'
+p13
+sS'userdata'
+p14
+S'user-data.txt.i'
+p15
+sS'vendordata'
+p16
+S'vendor-data.txt.i'
+p17
+sS'userdata_raw'
+p18
+S'user-data.txt'
+p19
+sS'boothooks'
+p20
+g20
+sS'scripts'
+p21
+g21
+sS'sem'
+p22
+g22
+sS'data'
+p23
+g23
+sS'vendor_scripts'
+p24
+S'scripts/vendor'
+p25
+sS'handlers'
+p26
+g26
+sS'obj_pkl'
+p27
+S'obj.pkl'
+p28
+sS'vendordata_raw'
+p29
+S'vendor-data.txt'
+p30
+sS'vendor_cloud_config'
+p31
+S'vendor-cloud-config.txt'
+p32
+ssS'template_tpl'
+p33
+S'/etc/cloud/templates/%s.tmpl'
+p34
+sS'cfgs'
+p35
+(dp36
+S'cloud_dir'
+p37
+S'/var/lib/cloud/'
+p38
+sS'templates_dir'
+p39
+S'/etc/cloud/templates/'
+p40
+sS'upstart_dir'
+p41
+S'/etc/init/'
+p42
+ssS'cloud_dir'
+p43
+g38
+sS'datasource'
+p44
+NsS'upstart_conf_d'
+p45
+g42
+sS'boot_finished'
+p46
+S'/var/lib/cloud/instance/boot-finished'
+p47
+sS'instance_link'
+p48
+S'/var/lib/cloud/instance'
+p49
+sS'seed_dir'
+p50
+S'/var/lib/cloud/seed'
+p51
+sbsS'supported_seed_starts'
+p52
+(S'http://'
+p53
+S'https://'
+p54
+S'ftp://'
+p55
+tp56
+sS'sys_cfg'
+p57
+(dp58
+S'output'
+p59
+(dp60
+S'all'
+p61
+S'| tee -a /var/log/cloud-init-output.log'
+p62
+ssS'users'
+p63
+(lp64
+S'default'
+p65
+asS'def_log_file'
+p66
+S'/var/log/cloud-init.log'
+p67
+sS'cloud_final_modules'
+p68
+(lp69
+S'rightscale_userdata'
+p70
+aS'scripts-vendor'
+p71
+aS'scripts-per-once'
+p72
+aS'scripts-per-boot'
+p73
+aS'scripts-per-instance'
+p74
+aS'scripts-user'
+p75
+aS'ssh-authkey-fingerprints'
+p76
+aS'keys-to-console'
+p77
+aS'phone-home'
+p78
+aS'final-message'
+p79
+aS'power-state-change'
+p80
+asS'disable_root'
+p81
+I01
+sS'syslog_fix_perms'
+p82
+S'syslog:adm'
+p83
+sS'log_cfgs'
+p84
+(lp85
+(lp86
+S'[loggers]\nkeys=root,cloudinit\n\n[handlers]\nkeys=consoleHandler,cloudLogHandler\n\n[formatters]\nkeys=simpleFormatter,arg0Formatter\n\n[logger_root]\nlevel=DEBUG\nhandlers=consoleHandler,cloudLogHandler\n\n[logger_cloudinit]\nlevel=DEBUG\nqualname=cloudinit\nhandlers=\npropagate=1\n\n[handler_consoleHandler]\nclass=StreamHandler\nlevel=WARNING\nformatter=arg0Formatter\nargs=(sys.stderr,)\n\n[formatter_arg0Formatter]\nformat=%(asctime)s - %(filename)s[%(levelname)s]: %(message)s\n\n[formatter_simpleFormatter]\nformat=[CLOUDINIT] %(filename)s[%(levelname)s]: %(message)s\n'
+p87
+aS'[handler_cloudLogHandler]\nclass=handlers.SysLogHandler\nlevel=DEBUG\nformatter=simpleFormatter\nargs=("/dev/log", handlers.SysLogHandler.LOG_USER)\n'
+p88
+aa(lp89
+g87
+aS"[handler_cloudLogHandler]\nclass=FileHandler\nlevel=DEBUG\nformatter=arg0Formatter\nargs=('/var/log/cloud-init.log',)\n"
+p90
+aasS'cloud_init_modules'
+p91
+(lp92
+S'migrator'
+p93
+aS'seed_random'
+p94
+aS'bootcmd'
+p95
+aS'write-files'
+p96
+aS'growpart'
+p97
+aS'resizefs'
+p98
+aS'set_hostname'
+p99
+aS'update_hostname'
+p100
+aS'update_etc_hosts'
+p101
+aS'ca-certs'
+p102
+aS'rsyslog'
+p103
+aS'users-groups'
+p104
+aS'ssh'
+p105
+asS'preserve_hostname'
+p106
+I00
+sS'_log'
+p107
+(lp108
+g87
+ag90
+ag88
+asS'datasource_list'
+p109
+(lp110
+S'NoCloud'
+p111
+aS'ConfigDrive'
+p112
+aS'OpenNebula'
+p113
+aS'Azure'
+p114
+aS'AltCloud'
+p115
+aS'OVF'
+p116
+aS'MAAS'
+p117
+aS'GCE'
+p118
+aS'OpenStack'
+p119
+aS'CloudSigma'
+p120
+aS'Ec2'
+p121
+aS'CloudStack'
+p122
+aS'SmartOS'
+p123
+aS'None'
+p124
+asS'vendor_data'
+p125
+(dp126
+S'prefix'
+p127
+(lp128
+sS'enabled'
+p129
+I01
+ssS'cloud_config_modules'
+p130
+(lp131
+S'emit_upstart'
+p132
+aS'disk_setup'
+p133
+aS'mounts'
+p134
+aS'ssh-import-id'
+p135
+aS'locale'
+p136
+aS'set-passwords'
+p137
+aS'grub-dpkg'
+p138
+aS'apt-pipelining'
+p139
+aS'apt-configure'
+p140
+aS'package-update-upgrade-install'
+p141
+aS'landscape'
+p142
+aS'timezone'
+p143
+aS'puppet'
+p144
+aS'chef'
+p145
+aS'salt-minion'
+p146
+aS'mcollective'
+p147
+aS'disable-ec2-metadata'
+p148
+aS'runcmd'
+p149
+aS'byobu'
+p150
+assg14
+(iemail.mime.multipart
+MIMEMultipart
+p151
+(dp152
+S'_headers'
+p153
+(lp154
+(S'Content-Type'
+p155
+S'multipart/mixed; boundary="===============4291038100093149247=="'
+tp156
+a(S'MIME-Version'
+p157
+S'1.0'
+p158
+tp159
+a(S'Number-Attachments'
+p160
+S'1'
+tp161
+asS'_payload'
+p162
+(lp163
+(iemail.mime.base
+MIMEBase
+p164
+(dp165
+g153
+(lp166
+(g157
+g158
+tp167
+a(S'Content-Type'
+p168
+S'text/x-not-multipart'
+tp169
+a(S'Content-Disposition'
+p170
+S'attachment; filename="part-001"'
+tp171
+asg162
+S''
+sS'_charset'
+p172
+NsS'_default_type'
+p173
+S'text/plain'
+p174
+sS'preamble'
+p175
+NsS'defects'
+p176
+(lp177
+sS'_unixfrom'
+p178
+NsS'epilogue'
+p179
+Nsbasg172
+Nsg173
+g174
+sg175
+Nsg176
+(lp180
+sg178
+Nsg179
+Nsbsg16
+S'#cloud-config\n{}\n\n'
+p181
+sg18
+S'Content-Type: multipart/mixed; boundary="===============1378281702283945349=="\nMIME-Version: 1.0\n\n--===============1378281702283945349==\nContent-Type: text/x-shellscript; charset="utf-8"\nMIME-Version: 1.0\nContent-Transfer-Encoding: base64\nContent-Disposition: attachment; filename="script1.sh"\n\nIyEvYmluL3NoCgplY2hvICdoaScgPiAvdmFyL3RtcC9oaQo=\n\n--===============1378281702283945349==\nContent-Type: text/x-shellscript; charset="utf-8"\nMIME-Version: 1.0\nContent-Transfer-Encoding: base64\nContent-Disposition: attachment; filename="script2.sh"\n\nIyEvYmluL2Jhc2gKCmVjaG8gJ2hpMicgPiAvdmFyL3RtcC9oaTIK\n\n--===============1378281702283945349==--\n\n#cloud-config\n# final_message: |\n# This is my final message!\n# $version\n# $timestamp\n# $datasource\n# $uptime\n# updates:\n# network:\n# when: [\'hotplug\']\n'
+p182
+sg29
+NsS'dsmode'
+p183
+S'net'
+p184
+sS'seed'
+p185
+S'/var/lib/cloud/seed/nocloud-net'
+p186
+sS'cmdline_id'
+p187
+S'ds=nocloud-net'
+p188
+sS'ud_proc'
+p189
+g1
+(ccloudinit.user_data
+UserDataProcessor
+p190
+g3
+NtRp191
+(dp192
+g6
+g8
+sS'ssl_details'
+p193
+(dp194
+sbsg50
+g186
+sS'ds_cfg'
+p195
+(dp196
+sS'distro'
+p197
+g1
+(ccloudinit.distros.ubuntu
+Distro
+p198
+g3
+NtRp199
+(dp200
+S'osfamily'
+p201
+S'debian'
+p202
+sS'_paths'
+p203
+g8
+sS'name'
+p204
+S'ubuntu'
+p205
+sS'_runner'
+p206
+g1
+(ccloudinit.helpers
+Runners
+p207
+g3
+NtRp208
+(dp209
+g6
+g8
+sS'sems'
+p210
+(dp211
+sbsS'_cfg'
+p212
+(dp213
+S'paths'
+p214
+(dp215
+g37
+g38
+sg39
+g40
+sg41
+g42
+ssS'default_user'
+p216
+(dp217
+S'shell'
+p218
+S'/bin/bash'
+p219
+sS'name'
+p220
+S'ubuntu'
+p221
+sS'sudo'
+p222
+(lp223
+S'ALL=(ALL) NOPASSWD:ALL'
+p224
+asS'lock_passwd'
+p225
+I01
+sS'gecos'
+p226
+S'Ubuntu'
+p227
+sS'groups'
+p228
+(lp229
+S'adm'
+p230
+aS'audio'
+p231
+aS'cdrom'
+p232
+aS'dialout'
+p233
+aS'dip'
+p234
+aS'floppy'
+p235
+aS'netdev'
+p236
+aS'plugdev'
+p237
+aS'sudo'
+p238
+aS'video'
+p239
+assS'package_mirrors'
+p240
+(lp241
+(dp242
+S'arches'
+p243
+(lp244
+S'i386'
+p245
+aS'amd64'
+p246
+asS'failsafe'
+p247
+(dp248
+S'security'
+p249
+S'http://security.ubuntu.com/ubuntu'
+p250
+sS'primary'
+p251
+S'http://archive.ubuntu.com/ubuntu'
+p252
+ssS'search'
+p253
+(dp254
+S'security'
+p255
+(lp256
+sS'primary'
+p257
+(lp258
+S'http://%(ec2_region)s.ec2.archive.ubuntu.com/ubuntu/'
+p259
+aS'http://%(availability_zone)s.clouds.archive.ubuntu.com/ubuntu/'
+p260
+aS'http://%(region)s.clouds.archive.ubuntu.com/ubuntu/'
+p261
+assa(dp262
+S'arches'
+p263
+(lp264
+S'armhf'
+p265
+aS'armel'
+p266
+aS'default'
+p267
+asS'failsafe'
+p268
+(dp269
+S'security'
+p270
+S'http://ports.ubuntu.com/ubuntu-ports'
+p271
+sS'primary'
+p272
+S'http://ports.ubuntu.com/ubuntu-ports'
+p273
+ssasS'ssh_svcname'
+p274
+S'ssh'
+p275
+ssbsS'metadata'
+p276
+(dp277
+g183
+g184
+sS'local-hostname'
+p278
+S'me'
+p279
+sS'instance-id'
+p280
+S'me'
+p281
+ssb. \ No newline at end of file
diff --git a/tests/integration_tests/bugs/test_gh570.py b/tests/integration_tests/bugs/test_gh570.py
new file mode 100644
index 00000000..e98ab5d0
--- /dev/null
+++ b/tests/integration_tests/bugs/test_gh570.py
@@ -0,0 +1,39 @@
+"""Integration test for #570.
+
+Test that we can add optional vendor-data to the seedfrom file in a
+NoCloud environment
+"""
+
+import pytest
+
+from tests.integration_tests.instances import IntegrationInstance
+
+VENDOR_DATA = """\
+#cloud-config
+runcmd:
+ - touch /var/tmp/seeded_vendordata_test_file
+"""
+
+
+# Only running on LXD because we need NoCloud for this test
+@pytest.mark.lxd_container
+@pytest.mark.lxd_vm
+def test_nocloud_seedfrom_vendordata(client: IntegrationInstance):
+ seed_dir = "/var/tmp/test_seed_dir"
+ result = client.execute(
+ "mkdir {seed_dir} && "
+ "touch {seed_dir}/user-data && "
+ "touch {seed_dir}/meta-data && "
+ "echo 'seedfrom: {seed_dir}/' > "
+ "/var/lib/cloud/seed/nocloud-net/meta-data".format(seed_dir=seed_dir)
+ )
+ assert result.return_code == 0
+
+ client.write_to_file(
+ "{}/vendor-data".format(seed_dir),
+ VENDOR_DATA,
+ )
+ client.execute("cloud-init clean --logs")
+ client.restart()
+ assert client.execute("cloud-init status").ok
+ assert "seeded_vendordata_test_file" in client.execute("ls /var/tmp")
diff --git a/tests/integration_tests/bugs/test_gh626.py b/tests/integration_tests/bugs/test_gh626.py
new file mode 100644
index 00000000..b80b677a
--- /dev/null
+++ b/tests/integration_tests/bugs/test_gh626.py
@@ -0,0 +1,43 @@
+"""Integration test for gh-626.
+
+Ensure if wakeonlan is specified in the network config that it is rendered
+in the /etc/network/interfaces or netplan config.
+"""
+
+import pytest
+import yaml
+
+from tests.integration_tests import random_mac_address
+from tests.integration_tests.instances import IntegrationInstance
+
+MAC_ADDRESS = random_mac_address()
+NETWORK_CONFIG = """\
+version: 2
+ethernets:
+ eth0:
+ dhcp4: true
+ wakeonlan: true
+ match:
+ macaddress: {}
+""".format(
+ MAC_ADDRESS
+)
+
+EXPECTED_ENI_END = """\
+iface eth0 inet dhcp
+ ethernet-wol g"""
+
+
+@pytest.mark.lxd_container
+@pytest.mark.lxd_vm
+@pytest.mark.lxd_config_dict(
+ {
+ "user.network-config": NETWORK_CONFIG,
+ "volatile.eth0.hwaddr": MAC_ADDRESS,
+ }
+)
+def test_wakeonlan(client: IntegrationInstance):
+ netplan_cfg = client.execute("cat /etc/netplan/50-cloud-init.yaml")
+ netplan_yaml = yaml.safe_load(netplan_cfg)
+ assert "wakeonlan" in netplan_yaml["network"]["ethernets"]["eth0"]
+ assert netplan_yaml["network"]["ethernets"]["eth0"]["wakeonlan"] is True
diff --git a/tests/integration_tests/bugs/test_gh632.py b/tests/integration_tests/bugs/test_gh632.py
new file mode 100644
index 00000000..c7a897c6
--- /dev/null
+++ b/tests/integration_tests/bugs/test_gh632.py
@@ -0,0 +1,33 @@
+"""Integration test for gh-632.
+
+Verify that if cloud-init is using DataSourceRbxCloud, there is
+no traceback if the metadata disk cannot be found.
+"""
+import pytest
+
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import verify_clean_log
+
+
+# With some datasource hacking, we can run this on a NoCloud instance
+@pytest.mark.lxd_container
+@pytest.mark.lxd_vm
+def test_datasource_rbx_no_stacktrace(client: IntegrationInstance):
+ client.write_to_file(
+ "/etc/cloud/cloud.cfg.d/90_dpkg.cfg",
+ "datasource_list: [ RbxCloud, NoCloud ]\n",
+ )
+ client.write_to_file(
+ "/etc/cloud/ds-identify.cfg",
+ "policy: enabled\n",
+ )
+ client.execute("cloud-init clean --logs")
+ client.restart()
+
+ log = client.read_from_file("/var/log/cloud-init.log")
+ verify_clean_log(log)
+ assert "Failed to load metadata and userdata" not in log
+ assert (
+ "Getting data from <class 'cloudinit.sources.DataSourceRbxCloud."
+ "DataSourceRbxCloud'> failed" not in log
+ )
diff --git a/tests/integration_tests/bugs/test_gh668.py b/tests/integration_tests/bugs/test_gh668.py
new file mode 100644
index 00000000..95edb48d
--- /dev/null
+++ b/tests/integration_tests/bugs/test_gh668.py
@@ -0,0 +1,46 @@
+"""Integration test for gh-668.
+
+Ensure that static route to host is working correctly.
+The original problem is specific to the ENI renderer but that test is suitable
+for all network configuration outputs.
+"""
+
+import pytest
+
+from tests.integration_tests import random_mac_address
+from tests.integration_tests.instances import IntegrationInstance
+
+DESTINATION_IP = "172.16.0.10"
+GATEWAY_IP = "10.0.0.100"
+MAC_ADDRESS = random_mac_address()
+
+NETWORK_CONFIG = """\
+version: 2
+ethernets:
+ eth0:
+ addresses: [10.0.0.10/8]
+ dhcp4: false
+ routes:
+ - to: {}/32
+ via: {}
+ match:
+ macaddress: {}
+""".format(
+ DESTINATION_IP, GATEWAY_IP, MAC_ADDRESS
+)
+
+EXPECTED_ROUTE = "{} via {}".format(DESTINATION_IP, GATEWAY_IP)
+
+
+@pytest.mark.lxd_container
+@pytest.mark.lxd_vm
+@pytest.mark.lxd_config_dict(
+ {
+ "user.network-config": NETWORK_CONFIG,
+ "volatile.eth0.hwaddr": MAC_ADDRESS,
+ }
+)
+@pytest.mark.lxd_use_exec
+def test_static_route_to_host(client: IntegrationInstance):
+ route = client.execute("ip route | grep {}".format(DESTINATION_IP))
+ assert route.startswith(EXPECTED_ROUTE)
diff --git a/tests/integration_tests/bugs/test_gh671.py b/tests/integration_tests/bugs/test_gh671.py
new file mode 100644
index 00000000..2d7c8118
--- /dev/null
+++ b/tests/integration_tests/bugs/test_gh671.py
@@ -0,0 +1,53 @@
+"""Integration test for gh-671.
+
+Verify that on Azure that if a default user and password are specified
+through the Azure API that a change in the default password overwrites
+the old password
+"""
+
+import crypt
+
+import pytest
+
+from tests.integration_tests.clouds import IntegrationCloud
+
+OLD_PASSWORD = "DoIM33tTheComplexityRequirements!??"
+NEW_PASSWORD = "DoIM33tTheComplexityRequirementsNow!??"
+
+
+def _check_password(instance, unhashed_password):
+ shadow_password = instance.execute("getent shadow ubuntu").split(":")[1]
+ salt = shadow_password.rsplit("$", 1)[0]
+ hashed_password = crypt.crypt(unhashed_password, salt)
+ assert shadow_password == hashed_password
+
+
+@pytest.mark.azure
+def test_update_default_password(setup_image, session_cloud: IntegrationCloud):
+ os_profile = {
+ "os_profile": {
+ "admin_password": "",
+ "linux_configuration": {"disable_password_authentication": False},
+ }
+ }
+ os_profile["os_profile"]["admin_password"] = OLD_PASSWORD
+ instance1 = session_cloud.launch(launch_kwargs={"vm_params": os_profile})
+
+ _check_password(instance1, OLD_PASSWORD)
+
+ snapshot_id = instance1.cloud.cloud_instance.snapshot(
+ instance1.instance, delete_provisioned_user=False
+ )
+
+ os_profile["os_profile"]["admin_password"] = NEW_PASSWORD
+ try:
+ with session_cloud.launch(
+ launch_kwargs={
+ "image_id": snapshot_id,
+ "vm_params": os_profile,
+ }
+ ) as instance2:
+ _check_password(instance2, NEW_PASSWORD)
+ finally:
+ session_cloud.cloud_instance.delete_image(snapshot_id)
+ instance1.destroy()
diff --git a/tests/integration_tests/bugs/test_gh868.py b/tests/integration_tests/bugs/test_gh868.py
new file mode 100644
index 00000000..a62e8b36
--- /dev/null
+++ b/tests/integration_tests/bugs/test_gh868.py
@@ -0,0 +1,27 @@
+"""Ensure no Traceback when 'chef_license' is set"""
+import pytest
+
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import verify_clean_log
+
+USERDATA = """\
+#cloud-config
+chef:
+ install_type: omnibus
+ chef_license: accept
+ server_url: https://chef.yourorg.invalid
+ validation_name: some-validator
+"""
+
+
+@pytest.mark.adhoc # Can't be regularly reaching out to chef install script
+@pytest.mark.ec2
+@pytest.mark.gce
+@pytest.mark.azure
+@pytest.mark.oci
+@pytest.mark.lxd_container
+@pytest.mark.lxd_vm
+@pytest.mark.user_data(USERDATA)
+def test_chef_license(client: IntegrationInstance):
+ log = client.read_from_file("/var/log/cloud-init.log")
+ verify_clean_log(log)
diff --git a/tests/integration_tests/bugs/test_lp1813396.py b/tests/integration_tests/bugs/test_lp1813396.py
new file mode 100644
index 00000000..ddae02f5
--- /dev/null
+++ b/tests/integration_tests/bugs/test_lp1813396.py
@@ -0,0 +1,31 @@
+"""Integration test for lp-1813396
+
+Ensure gpg is called with no tty flag.
+"""
+
+import pytest
+
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import verify_ordered_items_in_text
+
+USER_DATA = """\
+#cloud-config
+apt:
+ sources:
+ cloudinit:
+ source: 'deb [arch=amd64] http://ppa.launchpad.net/cloud-init-dev/daily/ubuntu focal main'
+ keyserver: keyserver.ubuntu.com
+ keyid: E4D304DF
+""" # noqa: E501
+
+
+@pytest.mark.user_data(USER_DATA)
+def test_gpg_no_tty(client: IntegrationInstance):
+ log = client.read_from_file("/var/log/cloud-init.log")
+ to_verify = [
+ "Running command ['gpg', '--no-tty', "
+ "'--keyserver=keyserver.ubuntu.com', '--recv-keys', 'E4D304DF'] "
+ "with allowed return codes [0] (shell=False, capture=True)",
+ "Imported key 'E4D304DF' from keyserver 'keyserver.ubuntu.com'",
+ ]
+ verify_ordered_items_in_text(to_verify, log)
diff --git a/tests/integration_tests/bugs/test_lp1835584.py b/tests/integration_tests/bugs/test_lp1835584.py
new file mode 100644
index 00000000..765d73ef
--- /dev/null
+++ b/tests/integration_tests/bugs/test_lp1835584.py
@@ -0,0 +1,101 @@
+""" Integration test for LP #1835584
+
+Upstream linux kernels prior to 4.15 provide DMI product_uuid in uppercase.
+More recent kernels switched to lowercase for DMI product_uuid. Azure
+datasource uses this product_uuid as the instance-id for cloud-init.
+
+The linux-azure-fips kernel installed in PRO FIPs images, that product UUID is
+uppercase whereas the linux-azure cloud-optimized kernel reports the UUID as
+lowercase.
+
+In cases where product_uuid changes case, ensure cloud-init doesn't
+recreate ssh hostkeys across reboot (due to detecting an instance_id change).
+
+This currently only affects linux-azure-fips -> linux-azure on Bionic.
+This test won't run on Xenial because both linux-azure-fips and linux-azure
+report uppercase product_uuids.
+
+The test will launch a specific Bionic Ubuntu PRO FIPS image which has a
+linux-azure-fips kernel known to report product_uuid as uppercase. Then upgrade
+and reboot into linux-azure kernel which is known to report product_uuid as
+lowercase.
+
+Across the reboot, assert that we didn't re-run config_ssh by virtue of
+seeing only one semaphore creation log entry of type:
+
+ Writing to /var/lib/cloud/instances/<UUID>/sem/config_ssh -
+
+https://bugs.launchpad.net/cloud-init/+bug/1835584
+"""
+import re
+
+import pytest
+
+from tests.integration_tests.clouds import ImageSpecification, IntegrationCloud
+from tests.integration_tests.conftest import get_validated_source
+from tests.integration_tests.instances import IntegrationInstance
+
+IMG_AZURE_UBUNTU_PRO_FIPS_BIONIC = (
+ "Canonical:0001-com-ubuntu-pro-bionic-fips:pro-fips-18_04:18.04.202010201"
+)
+
+
+def _check_iid_insensitive_across_kernel_upgrade(
+ instance: IntegrationInstance,
+):
+ uuid = instance.read_from_file("/sys/class/dmi/id/product_uuid")
+ assert (
+ uuid.isupper()
+ ), "Expected uppercase UUID on Ubuntu FIPS image {}".format(uuid)
+ orig_kernel = instance.execute("uname -r").strip()
+ assert "azure-fips" in orig_kernel
+ result = instance.execute("apt-get update")
+ # Install a 5.4+ kernel which provides lowercase product_uuid
+ result = instance.execute("apt-get install linux-azure --assume-yes")
+ if not result.ok:
+ pytest.fail("Unable to install linux-azure kernel: {}".format(result))
+ # Remove ubuntu-azure-fips metapkg which mandates FIPS-flavour kernel
+ result = instance.execute("ua disable fips --assume-yes")
+ assert result.ok, "Unable to disable fips: {}".format(result)
+ instance.restart()
+ new_kernel = instance.execute("uname -r").strip()
+ assert orig_kernel != new_kernel
+ assert "azure-fips" not in new_kernel
+ assert "azure" in new_kernel
+ new_uuid = instance.read_from_file("/sys/class/dmi/id/product_uuid")
+ assert (
+ uuid.lower() == new_uuid
+ ), "Expected UUID on linux-azure to be lowercase of FIPS: {}".format(uuid)
+ log = instance.read_from_file("/var/log/cloud-init.log")
+ RE_CONFIG_SSH_SEMAPHORE = r"Writing.*sem/config_ssh "
+ ssh_runs = len(re.findall(RE_CONFIG_SSH_SEMAPHORE, log))
+ assert 1 == ssh_runs, "config_ssh ran too many times {}".format(ssh_runs)
+
+
+@pytest.mark.azure
+def test_azure_kernel_upgrade_case_insensitive_uuid(
+ session_cloud: IntegrationCloud,
+):
+ cfg_image_spec = ImageSpecification.from_os_image()
+ if (cfg_image_spec.os, cfg_image_spec.release) != ("ubuntu", "bionic"):
+ pytest.skip(
+ "Test only supports ubuntu:bionic not {0.os}:{0.release}".format(
+ cfg_image_spec
+ )
+ )
+ source = get_validated_source(session_cloud)
+ if not source.installs_new_version():
+ pytest.skip(
+ "Provide CLOUD_INIT_SOURCE to install expected working cloud-init"
+ )
+ image_id = IMG_AZURE_UBUNTU_PRO_FIPS_BIONIC
+ with session_cloud.launch(
+ launch_kwargs={"image_id": image_id}
+ ) as instance:
+ # We can't use setup_image fixture here because we want to avoid
+ # taking a snapshot or cleaning the booted machine after cloud-init
+ # upgrade.
+ instance.install_new_cloud_init(
+ source, take_snapshot=False, clean=False
+ )
+ _check_iid_insensitive_across_kernel_upgrade(instance)
diff --git a/tests/integration_tests/bugs/test_lp1886531.py b/tests/integration_tests/bugs/test_lp1886531.py
index 058ea8bb..d56ca320 100644
--- a/tests/integration_tests/bugs/test_lp1886531.py
+++ b/tests/integration_tests/bugs/test_lp1886531.py
@@ -11,6 +11,7 @@ https://bugs.launchpad.net/ubuntu/+source/cloud-init/+bug/1886531
"""
import pytest
+from tests.integration_tests.util import verify_clean_log
USER_DATA = """\
#cloud-config
@@ -20,8 +21,7 @@ bootcmd:
class TestLp1886531:
-
@pytest.mark.user_data(USER_DATA)
def test_lp1886531(self, client):
log_content = client.read_from_file("/var/log/cloud-init.log")
- assert "WARNING" not in log_content
+ verify_clean_log(log_content)
diff --git a/tests/integration_tests/bugs/test_lp1897099.py b/tests/integration_tests/bugs/test_lp1897099.py
index 27c8927f..1f5030ce 100644
--- a/tests/integration_tests/bugs/test_lp1897099.py
+++ b/tests/integration_tests/bugs/test_lp1897099.py
@@ -7,7 +7,6 @@ https://bugs.launchpad.net/cloud-init/+bug/1897099
import pytest
-
USER_DATA = """\
#cloud-config
bootcmd:
@@ -19,13 +18,12 @@ swap:
"""
-@pytest.mark.sru_2020_11
@pytest.mark.user_data(USER_DATA)
-@pytest.mark.no_container('Containers cannot configure swap')
+@pytest.mark.no_container("Containers cannot configure swap")
def test_fallocate_fallback(client):
- log = client.read_from_file('/var/log/cloud-init.log')
- assert '/swap.img' in client.execute('cat /proc/swaps')
- assert '/swap.img' in client.execute('cat /etc/fstab')
- assert 'fallocate swap creation failed, will attempt with dd' in log
+ log = client.read_from_file("/var/log/cloud-init.log")
+ assert "/swap.img" in client.execute("cat /proc/swaps")
+ assert "/swap.img" in client.execute("cat /etc/fstab")
+ assert "fallocate swap creation failed, will attempt with dd" in log
assert "Running command ['dd', 'if=/dev/zero', 'of=/swap.img'" in log
- assert 'SUCCESS: config-mounts ran successfully' in log
+ assert "SUCCESS: config-mounts ran successfully" in log
diff --git a/tests/integration_tests/bugs/test_lp1898997.py b/tests/integration_tests/bugs/test_lp1898997.py
new file mode 100644
index 00000000..d8ea54c3
--- /dev/null
+++ b/tests/integration_tests/bugs/test_lp1898997.py
@@ -0,0 +1,77 @@
+"""Integration test for LP: #1898997
+
+cloud-init was incorrectly excluding Open vSwitch bridge members from its list
+of interfaces. This meant that instances which had only one interface which
+was in an Open vSwitch bridge would not boot correctly: cloud-init would not
+find the expected physical interfaces, so would not apply network config.
+
+This test checks that cloud-init believes it has successfully applied the
+network configuration, and confirms that the bridge can be used to ping the
+default gateway.
+"""
+import pytest
+
+from tests.integration_tests import random_mac_address
+from tests.integration_tests.util import verify_clean_log
+
+MAC_ADDRESS = random_mac_address()
+
+
+NETWORK_CONFIG = """\
+bridges:
+ ovs-br:
+ dhcp4: true
+ interfaces:
+ - enp5s0
+ macaddress: 52:54:00:d9:08:1c
+ mtu: 1500
+ openvswitch: {{}}
+ethernets:
+ enp5s0:
+ mtu: 1500
+ set-name: enp5s0
+ match:
+ macaddress: {}
+version: 2
+""".format(
+ MAC_ADDRESS
+)
+
+
+@pytest.mark.lxd_config_dict(
+ {
+ "user.network-config": NETWORK_CONFIG,
+ "volatile.eth0.hwaddr": MAC_ADDRESS,
+ }
+)
+@pytest.mark.lxd_vm
+@pytest.mark.lxd_use_exec
+@pytest.mark.not_bionic
+@pytest.mark.ubuntu
+class TestInterfaceListingWithOpenvSwitch:
+ def test_ovs_member_interfaces_not_excluded(self, client):
+ # We need to install openvswitch for our provided network configuration
+ # to apply (on next boot), so DHCP on our default interface to fetch it
+ client.execute("dhclient enp5s0")
+ client.execute("apt update -qqy")
+ client.execute("apt-get install -qqy openvswitch-switch")
+
+ # Now our networking config should successfully apply on a clean reboot
+ client.execute("cloud-init clean --logs")
+ client.restart()
+
+ cloudinit_output = client.read_from_file("/var/log/cloud-init.log")
+
+ # Confirm that the network configuration was applied successfully
+ verify_clean_log(cloudinit_output)
+ # Confirm that the applied network config created the OVS bridge
+ assert "ovs-br" in client.execute("ip addr")
+
+ # Test that we can ping our gateway using our bridge
+ gateway = client.execute(
+ "ip -4 route show default | awk '{ print $3 }'"
+ )
+ ping_result = client.execute(
+ "ping -c 1 -W 1 -I ovs-br {}".format(gateway)
+ )
+ assert ping_result.ok
diff --git a/tests/integration_tests/bugs/test_lp1900837.py b/tests/integration_tests/bugs/test_lp1900837.py
index 3fe7d0d0..d9ef18aa 100644
--- a/tests/integration_tests/bugs/test_lp1900837.py
+++ b/tests/integration_tests/bugs/test_lp1900837.py
@@ -4,14 +4,12 @@ This test mirrors the reproducing steps from the reported bug: it changes the
permissions on cloud-init.log to 600 and confirms that they remain 600 after a
reboot.
"""
-import pytest
def _get_log_perms(client):
return client.execute("stat -c %a /var/log/cloud-init.log")
-@pytest.mark.sru_2020_11
class TestLogPermissionsNotResetOnReboot:
def test_permissions_unchanged(self, client):
# Confirm that the current permissions aren't 600
@@ -22,7 +20,8 @@ class TestLogPermissionsNotResetOnReboot:
assert "600" == _get_log_perms(client)
# Reboot
- client.instance.restart()
+ client.restart()
+ assert client.execute("cloud-init status").ok
# Check that permissions are not reset on reboot
assert "600" == _get_log_perms(client)
diff --git a/tests/integration_tests/bugs/test_lp1901011.py b/tests/integration_tests/bugs/test_lp1901011.py
new file mode 100644
index 00000000..7de8bd77
--- /dev/null
+++ b/tests/integration_tests/bugs/test_lp1901011.py
@@ -0,0 +1,67 @@
+"""Integration test for LP: #1901011
+
+Ensure an ephemeral disk exists after boot.
+
+See https://github.com/canonical/cloud-init/pull/800
+"""
+import pytest
+
+from tests.integration_tests.clouds import IntegrationCloud
+
+
+@pytest.mark.azure
+@pytest.mark.parametrize(
+ "instance_type,is_ephemeral",
+ [
+ ("Standard_DS1_v2", True),
+ ("Standard_D2s_v4", False),
+ ],
+)
+def test_ephemeral(
+ instance_type, is_ephemeral, session_cloud: IntegrationCloud, setup_image
+):
+ if is_ephemeral:
+ expected_log = (
+ "Ephemeral resource disk '/dev/disk/cloud/azure_resource' exists. "
+ "Merging default Azure cloud ephemeral disk configs."
+ )
+ else:
+ expected_log = (
+ "Ephemeral resource disk '/dev/disk/cloud/azure_resource' does "
+ "not exist. Not merging default Azure cloud ephemeral disk "
+ "configs."
+ )
+
+ with session_cloud.launch(
+ launch_kwargs={"instance_type": instance_type}
+ ) as client:
+ # Verify log file
+ log = client.read_from_file("/var/log/cloud-init.log")
+ assert expected_log in log
+
+ # Verify devices
+ dev_links = client.execute("ls /dev/disk/cloud")
+ assert "azure_root" in dev_links
+ assert "azure_root-part1" in dev_links
+ if is_ephemeral:
+ assert "azure_resource" in dev_links
+ assert "azure_resource-part1" in dev_links
+
+ # Verify mounts
+ blks = client.execute("lsblk -pPo NAME,TYPE,MOUNTPOINT")
+ root_device = client.execute(
+ "realpath /dev/disk/cloud/azure_root-part1"
+ )
+ assert (
+ 'NAME="{}" TYPE="part" MOUNTPOINT="/"'.format(root_device) in blks
+ )
+ if is_ephemeral:
+ ephemeral_device = client.execute(
+ "realpath /dev/disk/cloud/azure_resource-part1"
+ )
+ assert (
+ 'NAME="{}" TYPE="part" MOUNTPOINT="/mnt"'.format(
+ ephemeral_device
+ )
+ in blks
+ )
diff --git a/tests/integration_tests/bugs/test_lp1910835.py b/tests/integration_tests/bugs/test_lp1910835.py
new file mode 100644
index 00000000..1844594c
--- /dev/null
+++ b/tests/integration_tests/bugs/test_lp1910835.py
@@ -0,0 +1,64 @@
+"""Integration test for LP: #1910835.
+
+If users do not provide an SSH key and instead ask Azure to generate a key for
+them, the key material available in the IMDS may include CRLF sequences. Prior
+to e56b55452549cb037da0a4165154ffa494e9678a, the Azure datasource handled keys
+via a certificate, the tooling for which removed these sequences. This test
+ensures that cloud-init does not regress support for this Azure behaviour.
+
+This test provides the SSH key configured for tests to the instance in two
+ways: firstly, with CRLFs to mimic the generated keys, via the Azure API;
+secondly, as user-data in unmodified form. This means that even on systems
+which exhibit the bug fetching the platform's metadata, we can SSH into the SUT
+to confirm this (instead of having to assert SSH failure; there are lots of
+reasons SSH might fail).
+
+Once SSH'd in, we check that the two keys in .ssh/authorized_keys have the same
+material: if the Azure datasource has removed the CRLFs correctly, then they
+will match.
+"""
+import pytest
+
+USER_DATA_TMPL = """\
+#cloud-config
+ssh_authorized_keys:
+ - {}"""
+
+
+@pytest.mark.azure
+def test_crlf_in_azure_metadata_ssh_keys(session_cloud, setup_image):
+ authorized_keys_path = "/home/{}/.ssh/authorized_keys".format(
+ session_cloud.cloud_instance.username
+ )
+ # Pass in user-data to allow us to access the instance when the normal
+ # path fails
+ key_data = session_cloud.cloud_instance.key_pair.public_key_content
+ user_data = USER_DATA_TMPL.format(key_data)
+ # Throw a CRLF into the otherwise good key data, to emulate Azure's
+ # behaviour for generated keys
+ key_data = key_data[:20] + "\r\n" + key_data[20:]
+ vm_params = {
+ "os_profile": {
+ "linux_configuration": {
+ "ssh": {
+ "public_keys": [
+ {"path": authorized_keys_path, "key_data": key_data}
+ ]
+ }
+ }
+ }
+ }
+ with session_cloud.launch(
+ launch_kwargs={"vm_params": vm_params, "user_data": user_data}
+ ) as client:
+ authorized_keys = (
+ client.read_from_file(authorized_keys_path).strip().splitlines()
+ )
+ # We expect one key from the cloud, one from user-data
+ assert 2 == len(authorized_keys)
+ # And those two keys should be the same, except for a possible key
+ # comment, which Azure strips out
+ assert (
+ authorized_keys[0].rsplit(" ")[:2]
+ == authorized_keys[1].split(" ")[:2]
+ )
diff --git a/tests/integration_tests/bugs/test_lp1912844.py b/tests/integration_tests/bugs/test_lp1912844.py
new file mode 100644
index 00000000..55511ed2
--- /dev/null
+++ b/tests/integration_tests/bugs/test_lp1912844.py
@@ -0,0 +1,105 @@
+"""Integration test for LP: #1912844
+
+cloud-init should ignore OVS-internal interfaces when performing its own
+interface determination: these interfaces are handled fully by OVS, so
+cloud-init should never need to touch them.
+
+This test is a semi-synthetic reproducer for the bug. It uses a similar
+network configuration, tweaked slightly to DHCP in a way that will succeed even
+on "failed" boots. The exact bug doesn't reproduce with the NoCloud
+datasource, because it runs at init-local time (whereas the MAAS datasource,
+from the report, runs only at init (network) time): this means that the
+networking code runs before OVS creates its interfaces (which happens after
+init-local but, of course, before networking is up), and so doesn't generate
+the traceback that they cause. We work around this by calling
+``get_interfaces_by_mac` directly in the test code.
+"""
+import pytest
+
+from tests.integration_tests import random_mac_address
+
+MAC_ADDRESS = random_mac_address()
+
+NETWORK_CONFIG = """\
+bonds:
+ bond0:
+ interfaces:
+ - enp5s0
+ macaddress: {0}
+ mtu: 1500
+bridges:
+ ovs-br:
+ interfaces:
+ - bond0
+ macaddress: {0}
+ mtu: 1500
+ openvswitch: {{}}
+ dhcp4: true
+ethernets:
+ enp5s0:
+ mtu: 1500
+ set-name: enp5s0
+ match:
+ macaddress: {0}
+version: 2
+vlans:
+ ovs-br.100:
+ id: 100
+ link: ovs-br
+ mtu: 1500
+ ovs-br.200:
+ id: 200
+ link: ovs-br
+ mtu: 1500
+""".format(
+ MAC_ADDRESS
+)
+
+
+SETUP_USER_DATA = """\
+#cloud-config
+packages:
+- openvswitch-switch
+"""
+
+
+@pytest.fixture
+def ovs_enabled_session_cloud(session_cloud):
+ """A session_cloud wrapper, to use an OVS-enabled image for tests.
+
+ This implementation is complicated by wanting to use ``session_cloud``s
+ snapshot cleanup/retention logic, to avoid having to reimplement that here.
+ """
+ old_snapshot_id = session_cloud.snapshot_id
+ with session_cloud.launch(
+ user_data=SETUP_USER_DATA,
+ ) as instance:
+ instance.instance.clean()
+ session_cloud.snapshot_id = instance.snapshot()
+
+ yield session_cloud
+
+ try:
+ session_cloud.delete_snapshot()
+ finally:
+ session_cloud.snapshot_id = old_snapshot_id
+
+
+@pytest.mark.lxd_vm
+def test_get_interfaces_by_mac_doesnt_traceback(ovs_enabled_session_cloud):
+ """Launch our OVS-enabled image and confirm the bug doesn't reproduce."""
+ launch_kwargs = {
+ "config_dict": {
+ "user.network-config": NETWORK_CONFIG,
+ "volatile.eth0.hwaddr": MAC_ADDRESS,
+ },
+ }
+ with ovs_enabled_session_cloud.launch(
+ launch_kwargs=launch_kwargs,
+ ) as client:
+ result = client.execute(
+ "python3 -c"
+ "'from cloudinit.net import get_interfaces_by_mac;"
+ "get_interfaces_by_mac()'"
+ )
+ assert result.ok
diff --git a/tests/integration_tests/clouds.py b/tests/integration_tests/clouds.py
index 88ac4408..83bc6af6 100644
--- a/tests/integration_tests/clouds.py
+++ b/tests/integration_tests/clouds.py
@@ -1,38 +1,107 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from abc import ABC, abstractmethod
+import datetime
import logging
-
-from pycloudlib import EC2, GCE, Azure, OCI, LXDContainer, LXDVirtualMachine
+import os.path
+import random
+import string
+from abc import ABC, abstractmethod
+from typing import Optional, Type
+from uuid import UUID
+
+from pycloudlib import (
+ EC2,
+ GCE,
+ OCI,
+ Azure,
+ LXDContainer,
+ LXDVirtualMachine,
+ Openstack,
+)
+from pycloudlib.cloud import BaseCloud
+from pycloudlib.lxd.cloud import _BaseLXD
from pycloudlib.lxd.instance import LXDInstance
import cloudinit
-from cloudinit.subp import subp
+from cloudinit.subp import ProcessExecutionError, subp
from tests.integration_tests import integration_settings
-from tests.integration_tests.instances import (
- IntegrationEc2Instance,
- IntegrationGceInstance,
- IntegrationAzureInstance, IntegrationInstance,
- IntegrationOciInstance,
- IntegrationLxdInstance,
-)
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import emit_dots_on_travis
-try:
- from typing import Optional
-except ImportError:
- pass
+log = logging.getLogger("integration_testing")
-log = logging.getLogger('integration_testing')
+def _get_ubuntu_series() -> list:
+ """Use distro-info-data's ubuntu.csv to get a list of Ubuntu series"""
+ out = ""
+ try:
+ out, _err = subp(["ubuntu-distro-info", "-a"])
+ except ProcessExecutionError:
+ log.info(
+ "ubuntu-distro-info (from the distro-info package) must be"
+ " installed to guess Ubuntu os/release"
+ )
+ return out.splitlines()
+
+
+class ImageSpecification:
+ """A specification of an image to launch for testing.
+
+ If either of ``os`` and ``release`` are not specified, an attempt will be
+ made to infer the correct values for these on instantiation.
+
+ :param image_id:
+ The image identifier used by the rest of the codebase to launch this
+ image.
+ :param os:
+ An optional string describing the operating system this image is for
+ (e.g. "ubuntu", "rhel", "freebsd").
+ :param release:
+ A optional string describing the operating system release (e.g.
+ "focal", "8"; the exact values here will depend on the OS).
+ """
+
+ def __init__(
+ self,
+ image_id: str,
+ os: Optional[str] = None,
+ release: Optional[str] = None,
+ ):
+ if image_id in _get_ubuntu_series():
+ if os is None:
+ os = "ubuntu"
+ if release is None:
+ release = image_id
+
+ self.image_id = image_id
+ self.os = os
+ self.release = release
+ log.info(
+ "Detected image: image_id=%s os=%s release=%s",
+ self.image_id,
+ self.os,
+ self.release,
+ )
+
+ @classmethod
+ def from_os_image(cls):
+ """Return an ImageSpecification for integration_settings.OS_IMAGE."""
+ parts = integration_settings.OS_IMAGE.split("::", 2)
+ return cls(*parts)
class IntegrationCloud(ABC):
- datasource = None # type: Optional[str]
- integration_instance_cls = IntegrationInstance
+ datasource: str
+ cloud_instance: BaseCloud
def __init__(self, settings=integration_settings):
self.settings = settings
- self.cloud_instance = self._get_cloud_instance()
- self.image_id = self._get_initial_image()
+ self.cloud_instance: BaseCloud = self._get_cloud_instance()
+ self.initial_image_id = self._get_initial_image()
+ self.snapshot_id = None
+
+ @property
+ def image_id(self):
+ return self.snapshot_id or self.initial_image_id
def emit_settings_to_log(self) -> None:
log.info(
@@ -50,49 +119,62 @@ class IntegrationCloud(ABC):
raise NotImplementedError
def _get_initial_image(self):
- image_id = self.settings.OS_IMAGE
+ image = ImageSpecification.from_os_image()
try:
- image_id = self.cloud_instance.released_image(
- self.settings.OS_IMAGE)
+ return self.cloud_instance.daily_image(image.image_id)
except (ValueError, IndexError):
- pass
- return image_id
+ return image.image_id
- def _perform_launch(self, launch_kwargs):
+ def _perform_launch(self, launch_kwargs, **kwargs):
pycloudlib_instance = self.cloud_instance.launch(**launch_kwargs)
- pycloudlib_instance.wait(raise_on_cloudinit_failure=False)
return pycloudlib_instance
- def launch(self, user_data=None, launch_kwargs=None,
- settings=integration_settings):
+ def launch(
+ self,
+ user_data=None,
+ launch_kwargs=None,
+ settings=integration_settings,
+ **kwargs,
+ ) -> IntegrationInstance:
+ if launch_kwargs is None:
+ launch_kwargs = {}
if self.settings.EXISTING_INSTANCE_ID:
log.info(
- 'Not launching instance due to EXISTING_INSTANCE_ID. '
- 'Instance id: %s', self.settings.EXISTING_INSTANCE_ID)
+ "Not launching instance due to EXISTING_INSTANCE_ID. "
+ "Instance id: %s",
+ self.settings.EXISTING_INSTANCE_ID,
+ )
self.instance = self.cloud_instance.get_instance(
self.settings.EXISTING_INSTANCE_ID
)
- return
- kwargs = {
- 'image_id': self.image_id,
- 'user_data': user_data,
- 'wait': False,
+ return self.instance
+ default_launch_kwargs = {
+ "image_id": self.image_id,
+ "user_data": user_data,
}
- if launch_kwargs:
- kwargs.update(launch_kwargs)
+ launch_kwargs = {**default_launch_kwargs, **launch_kwargs}
log.info(
- "Launching instance with launch_kwargs:\n{}".format(
- "\n".join("{}={}".format(*item) for item in kwargs.items())
- )
+ "Launching instance with launch_kwargs:\n%s",
+ "\n".join("{}={}".format(*item) for item in launch_kwargs.items()),
)
- pycloudlib_instance = self._perform_launch(kwargs)
-
- log.info('Launched instance: %s', pycloudlib_instance)
- return self.get_instance(pycloudlib_instance, settings)
+ with emit_dots_on_travis():
+ pycloudlib_instance = self._perform_launch(launch_kwargs, **kwargs)
+ log.info("Launched instance: %s", pycloudlib_instance)
+ instance = self.get_instance(pycloudlib_instance, settings)
+ if launch_kwargs.get("wait", True):
+ # If we aren't waiting, we can't rely on command execution here
+ log.info(
+ "cloud-init version: %s",
+ instance.execute("cloud-init --version"),
+ )
+ serial = instance.execute("grep serial /etc/cloud/build.info")
+ if serial:
+ log.info("image serial: %s", serial.split()[1])
+ return instance
def get_instance(self, cloud_instance, settings=integration_settings):
- return self.integration_instance_cls(self, cloud_instance, settings)
+ return IntegrationInstance(self, cloud_instance, settings)
def destroy(self):
pass
@@ -100,52 +182,69 @@ class IntegrationCloud(ABC):
def snapshot(self, instance):
return self.cloud_instance.snapshot(instance, clean=True)
+ def delete_snapshot(self):
+ if self.snapshot_id:
+ if self.settings.KEEP_IMAGE:
+ log.info(
+ "NOT deleting snapshot image created for this testrun "
+ "because KEEP_IMAGE is True: %s",
+ self.snapshot_id,
+ )
+ else:
+ log.info(
+ "Deleting snapshot image created for this testrun: %s",
+ self.snapshot_id,
+ )
+ self.cloud_instance.delete_image(self.snapshot_id)
+
class Ec2Cloud(IntegrationCloud):
- datasource = 'ec2'
- integration_instance_cls = IntegrationEc2Instance
+ datasource = "ec2"
def _get_cloud_instance(self):
- return EC2(tag='ec2-integration-test')
+ return EC2(tag="ec2-integration-test")
class GceCloud(IntegrationCloud):
- datasource = 'gce'
- integration_instance_cls = IntegrationGceInstance
+ datasource = "gce"
def _get_cloud_instance(self):
return GCE(
- tag='gce-integration-test',
- project=self.settings.GCE_PROJECT,
- region=self.settings.GCE_REGION,
- zone=self.settings.GCE_ZONE,
+ tag="gce-integration-test",
)
class AzureCloud(IntegrationCloud):
- datasource = 'azure'
- integration_instance_cls = IntegrationAzureInstance
+ datasource = "azure"
+ cloud_instance: Azure
def _get_cloud_instance(self):
- return Azure(tag='azure-integration-test')
+ return Azure(tag="azure-integration-test")
def destroy(self):
- self.cloud_instance.delete_resource_group()
+ if self.settings.KEEP_INSTANCE:
+ log.info(
+ "NOT deleting resource group because KEEP_INSTANCE is true "
+ "and deleting resource group would also delete instance. "
+ "Instance and resource group must both be manually deleted."
+ )
+ else:
+ self.cloud_instance.delete_resource_group()
class OciCloud(IntegrationCloud):
- datasource = 'oci'
- integration_instance_cls = IntegrationOciInstance
+ datasource = "oci"
def _get_cloud_instance(self):
return OCI(
- tag='oci-integration-test',
- compartment_id=self.settings.OCI_COMPARTMENT_ID
+ tag="oci-integration-test",
)
class _LxdIntegrationCloud(IntegrationCloud):
- integration_instance_cls = IntegrationLxdInstance
+ pycloudlib_instance_cls: Type[_BaseLXD]
+ instance_tag: str
+ cloud_instance: _BaseLXD
def _get_cloud_instance(self):
return self.pycloudlib_instance_cls(tag=self.instance_tag)
@@ -156,60 +255,102 @@ class _LxdIntegrationCloud(IntegrationCloud):
@staticmethod
def _mount_source(instance: LXDInstance):
- target_path = '/usr/lib/python3/dist-packages/cloudinit'
- format_variables = {
- 'name': instance.name,
- 'source_path': cloudinit.__path__[0],
- 'container_path': target_path,
- }
- log.info(
- 'Mounting source {source_path} directly onto LXD container/vm '
- 'named {name} at {container_path}'.format(**format_variables))
- command = (
- 'lxc config device add {name} host-cloud-init disk '
- 'source={source_path} '
- 'path={container_path}'
- ).format(**format_variables)
- subp(command.split())
-
- def _perform_launch(self, launch_kwargs):
- launch_kwargs['inst_type'] = launch_kwargs.pop('instance_type', None)
- launch_kwargs.pop('wait')
- release = launch_kwargs.pop('image_id')
+ cloudinit_path = cloudinit.__path__[0]
+ mounts = [
+ (cloudinit_path, "/usr/lib/python3/dist-packages/cloudinit"),
+ (
+ os.path.join(cloudinit_path, "..", "templates"),
+ "/etc/cloud/templates",
+ ),
+ ]
+ for (n, (source_path, target_path)) in enumerate(mounts):
+ format_variables = {
+ "name": instance.name,
+ "source_path": os.path.realpath(source_path),
+ "container_path": target_path,
+ "idx": n,
+ }
+ log.info(
+ "Mounting source %(source_path)s directly onto LXD"
+ " container/VM named %(name)s at %(container_path)s",
+ format_variables,
+ )
+ command = (
+ "lxc config device add {name} host-cloud-init-{idx} disk "
+ "source={source_path} "
+ "path={container_path}"
+ ).format(**format_variables)
+ subp(command.split())
+
+ def _perform_launch(self, launch_kwargs, **kwargs):
+ launch_kwargs["inst_type"] = launch_kwargs.pop("instance_type", None)
+ wait = launch_kwargs.pop("wait", True)
+ release = launch_kwargs.pop("image_id")
try:
- profile_list = launch_kwargs['profile_list']
+ profile_list = launch_kwargs["profile_list"]
except KeyError:
profile_list = self._get_or_set_profile_list(release)
+ prefix = datetime.datetime.utcnow().strftime("cloudinit-%m%d-%H%M%S")
+ default_name = prefix + "".join(
+ random.choices(string.ascii_lowercase + string.digits, k=8)
+ )
pycloudlib_instance = self.cloud_instance.init(
- launch_kwargs.pop('name', None),
+ launch_kwargs.pop("name", default_name),
release,
profile_list=profile_list,
- **launch_kwargs
+ **launch_kwargs,
)
- if self.settings.CLOUD_INIT_SOURCE == 'IN_PLACE':
+ if self.settings.CLOUD_INIT_SOURCE == "IN_PLACE":
self._mount_source(pycloudlib_instance)
- pycloudlib_instance.start(wait=False)
- pycloudlib_instance.wait(raise_on_cloudinit_failure=False)
+ if "lxd_setup" in kwargs:
+ log.info("Running callback specified by 'lxd_setup' mark")
+ kwargs["lxd_setup"](pycloudlib_instance)
+ pycloudlib_instance.start(wait=wait)
return pycloudlib_instance
class LxdContainerCloud(_LxdIntegrationCloud):
- datasource = 'lxd_container'
+ datasource = "lxd_container"
+ cloud_instance: LXDContainer
pycloudlib_instance_cls = LXDContainer
- instance_tag = 'lxd-container-integration-test'
+ instance_tag = "lxd-container-integration-test"
class LxdVmCloud(_LxdIntegrationCloud):
- datasource = 'lxd_vm'
+ datasource = "lxd_vm"
+ cloud_instance: LXDVirtualMachine
pycloudlib_instance_cls = LXDVirtualMachine
- instance_tag = 'lxd-vm-integration-test'
+ instance_tag = "lxd-vm-integration-test"
_profile_list = None
def _get_or_set_profile_list(self, release):
if self._profile_list:
return self._profile_list
self._profile_list = self.cloud_instance.build_necessary_profiles(
- release)
+ release
+ )
return self._profile_list
+
+
+class OpenstackCloud(IntegrationCloud):
+ datasource = "openstack"
+
+ def _get_cloud_instance(self):
+ return Openstack(
+ tag="openstack-integration-test",
+ )
+
+ def _get_initial_image(self):
+ image = ImageSpecification.from_os_image()
+ try:
+ UUID(image.image_id)
+ except ValueError as e:
+ raise Exception(
+ "When using Openstack, `OS_IMAGE` MUST be specified with "
+ "a 36-character UUID image ID. Passing in a release name is "
+ "not valid here.\n"
+ "OS image id: {}".format(image.image_id)
+ ) from e
+ return image.image_id
diff --git a/tests/integration_tests/conftest.py b/tests/integration_tests/conftest.py
index 73b44bfc..a90a5d49 100644
--- a/tests/integration_tests/conftest.py
+++ b/tests/integration_tests/conftest.py
@@ -1,33 +1,51 @@
# This file is part of cloud-init. See LICENSE file for license information.
+import datetime
+import functools
import logging
import os
-import pytest
import sys
from contextlib import contextmanager
+from pathlib import Path
+from tarfile import TarFile
+from typing import Dict, Type
+
+import pytest
+from pycloudlib.lxd.instance import LXDInstance
from tests.integration_tests import integration_settings
from tests.integration_tests.clouds import (
+ AzureCloud,
Ec2Cloud,
GceCloud,
- AzureCloud,
- OciCloud,
+ ImageSpecification,
+ IntegrationCloud,
LxdContainerCloud,
LxdVmCloud,
+ OciCloud,
+ OpenstackCloud,
+ _LxdIntegrationCloud,
+)
+from tests.integration_tests.instances import (
+ CloudInitSource,
+ IntegrationInstance,
)
-
-log = logging.getLogger('integration_testing')
+log = logging.getLogger("integration_testing")
log.addHandler(logging.StreamHandler(sys.stdout))
log.setLevel(logging.INFO)
-platforms = {
- 'ec2': Ec2Cloud,
- 'gce': GceCloud,
- 'azure': AzureCloud,
- 'oci': OciCloud,
- 'lxd_container': LxdContainerCloud,
- 'lxd_vm': LxdVmCloud,
+platforms: Dict[str, Type[IntegrationCloud]] = {
+ "ec2": Ec2Cloud,
+ "gce": GceCloud,
+ "azure": AzureCloud,
+ "oci": OciCloud,
+ "lxd_container": LxdContainerCloud,
+ "lxd_vm": LxdVmCloud,
+ "openstack": OpenstackCloud,
}
+os_list = ["ubuntu"]
+
+session_start_time = datetime.datetime.now().strftime("%y%m%d%H%M%S")
def pytest_runtest_setup(item):
@@ -42,18 +60,30 @@ def pytest_runtest_setup(item):
test_marks = [mark.name for mark in item.iter_markers()]
supported_platforms = set(all_platforms).intersection(test_marks)
current_platform = integration_settings.PLATFORM
- unsupported_message = 'Cannot run on platform {}'.format(current_platform)
- if 'no_container' in test_marks:
- if 'lxd_container' in test_marks:
+ unsupported_message = "Cannot run on platform {}".format(current_platform)
+ if "no_container" in test_marks:
+ if "lxd_container" in test_marks:
raise Exception(
- 'lxd_container and no_container marks simultaneously set '
- 'on test'
+ "lxd_container and no_container marks simultaneously set "
+ "on test"
)
- if current_platform == 'lxd_container':
+ if current_platform == "lxd_container":
pytest.skip(unsupported_message)
if supported_platforms and current_platform not in supported_platforms:
pytest.skip(unsupported_message)
+ image = ImageSpecification.from_os_image()
+ current_os = image.os
+ supported_os_set = set(os_list).intersection(test_marks)
+ if current_os and supported_os_set and current_os not in supported_os_set:
+ pytest.skip("Cannot run on OS {}".format(current_os))
+ if "unstable" in test_marks and not integration_settings.RUN_UNSTABLE:
+ pytest.skip("Test marked unstable. Manually remove mark to run it")
+
+ current_release = image.release
+ if "not_{}".format(current_release) in test_marks:
+ pytest.skip("Cannot run on release {}".format(current_release))
+
# disable_subp_usage is defined at a higher level, but we don't
# want it applied here
@@ -62,7 +92,7 @@ def disable_subp_usage(request):
pass
-@pytest.yield_fixture(scope='session')
+@pytest.fixture(scope="session")
def session_cloud():
if integration_settings.PLATFORM not in platforms.keys():
raise ValueError(
@@ -74,83 +104,185 @@ def session_cloud():
cloud = platforms[integration_settings.PLATFORM]()
cloud.emit_settings_to_log()
+
yield cloud
+
cloud.destroy()
-@pytest.fixture(scope='session', autouse=True)
-def setup_image(session_cloud):
+def get_validated_source(
+ session_cloud: IntegrationCloud,
+ source=integration_settings.CLOUD_INIT_SOURCE,
+) -> CloudInitSource:
+ if source == "NONE":
+ return CloudInitSource.NONE
+ elif source == "IN_PLACE":
+ if session_cloud.datasource not in ["lxd_container", "lxd_vm"]:
+ raise ValueError(
+ "IN_PLACE as CLOUD_INIT_SOURCE only works for LXD"
+ )
+ return CloudInitSource.IN_PLACE
+ elif source == "PROPOSED":
+ return CloudInitSource.PROPOSED
+ elif source.startswith("ppa:"):
+ return CloudInitSource.PPA
+ elif os.path.isfile(str(source)):
+ return CloudInitSource.DEB_PACKAGE
+ elif source == "UPGRADE":
+ return CloudInitSource.UPGRADE
+ raise ValueError(
+ "Invalid value for CLOUD_INIT_SOURCE setting: {}".format(source)
+ )
+
+
+@pytest.fixture(scope="session")
+def setup_image(session_cloud: IntegrationCloud, request):
"""Setup the target environment with the correct version of cloud-init.
So we can launch instances / run tests with the correct image
"""
- client = None
- log.info('Setting up environment for %s', session_cloud.datasource)
- if integration_settings.CLOUD_INIT_SOURCE == 'NONE':
- pass # that was easy
- elif integration_settings.CLOUD_INIT_SOURCE == 'IN_PLACE':
- if session_cloud.datasource not in ['lxd_container', 'lxd_vm']:
- raise ValueError(
- 'IN_PLACE as CLOUD_INIT_SOURCE only works for LXD')
- # The mount needs to happen after the instance is created, so
- # no further action needed here
- elif integration_settings.CLOUD_INIT_SOURCE == 'PROPOSED':
- client = session_cloud.launch()
- client.install_proposed_image()
- elif integration_settings.CLOUD_INIT_SOURCE.startswith('ppa:'):
- client = session_cloud.launch()
- client.install_ppa(integration_settings.CLOUD_INIT_SOURCE)
- elif os.path.isfile(str(integration_settings.CLOUD_INIT_SOURCE)):
- client = session_cloud.launch()
- client.install_deb()
- else:
- raise ValueError(
- 'Invalid value for CLOUD_INIT_SOURCE setting: {}'.format(
- integration_settings.CLOUD_INIT_SOURCE))
- if client:
- # Even if we're keeping instances, we don't want to keep this
- # one around as it was just for image creation
- client.destroy()
- log.info('Done with environment setup')
+
+ source = get_validated_source(session_cloud)
+ if not source.installs_new_version():
+ return
+ log.info("Setting up environment for %s", session_cloud.datasource)
+ client = session_cloud.launch()
+ client.install_new_cloud_init(source)
+ # Even if we're keeping instances, we don't want to keep this
+ # one around as it was just for image creation
+ client.destroy()
+ log.info("Done with environment setup")
+
+ # For some reason a yield here raises a
+ # ValueError: setup_image did not yield a value
+ # during setup so use a finalizer instead.
+ request.addfinalizer(session_cloud.delete_snapshot)
+
+
+def _collect_logs(
+ instance: IntegrationInstance, node_id: str, test_failed: bool
+):
+ """Collect logs from remote instance.
+
+ Args:
+ instance: The current IntegrationInstance to collect logs from
+ node_id: The pytest representation of this test, E.g.:
+ tests/integration_tests/test_example.py::TestExample.test_example
+ test_failed: If test failed or not
+ """
+ if any(
+ [
+ integration_settings.COLLECT_LOGS == "NEVER",
+ integration_settings.COLLECT_LOGS == "ON_ERROR"
+ and not test_failed,
+ ]
+ ):
+ return
+ instance.execute(
+ "cloud-init collect-logs -u -t /var/tmp/cloud-init.tar.gz"
+ )
+ node_id_path = Path(
+ node_id.replace(
+ ".py", ""
+ ) # Having a directory with '.py' would be weird
+ .replace("::", os.path.sep) # Turn classes/tests into paths
+ .replace("[", "-") # For parametrized names
+ .replace("]", "") # For parameterized names
+ )
+ log_dir = (
+ Path(integration_settings.LOCAL_LOG_PATH)
+ / session_start_time
+ / node_id_path
+ )
+ log.info("Writing logs to %s", log_dir)
+
+ if not log_dir.exists():
+ log_dir.mkdir(parents=True)
+
+ # Add a symlink to the latest log output directory
+ last_symlink = Path(integration_settings.LOCAL_LOG_PATH) / "last"
+ if os.path.islink(last_symlink):
+ os.unlink(last_symlink)
+ os.symlink(log_dir.parent, last_symlink)
+
+ tarball_path = log_dir / "cloud-init.tar.gz"
+ try:
+ instance.pull_file("/var/tmp/cloud-init.tar.gz", tarball_path)
+ except Exception as e:
+ log.error("Failed to pull logs: %s", e)
+ return
+
+ tarball = TarFile.open(str(tarball_path))
+ tarball.extractall(path=str(log_dir))
+ tarball_path.unlink()
@contextmanager
-def _client(request, fixture_utils, session_cloud):
+def _client(request, fixture_utils, session_cloud: IntegrationCloud):
"""Fixture implementation for the client fixtures.
Launch the dynamic IntegrationClient instance using any provided
userdata, yield to the test, then cleanup
"""
- user_data = fixture_utils.closest_marker_first_arg_or(
- request, 'user_data', None)
- name = fixture_utils.closest_marker_first_arg_or(
- request, 'instance_name', None
+ getter = functools.partial(
+ fixture_utils.closest_marker_first_arg_or, request, default=None
+ )
+ user_data = getter("user_data")
+ name = getter("instance_name")
+ lxd_config_dict = getter("lxd_config_dict")
+ lxd_setup = getter("lxd_setup")
+ lxd_use_exec = fixture_utils.closest_marker_args_or(
+ request, "lxd_use_exec", None
)
+
launch_kwargs = {}
if name is not None:
- launch_kwargs = {"name": name}
+ launch_kwargs["name"] = name
+ if lxd_config_dict is not None:
+ if not isinstance(session_cloud, _LxdIntegrationCloud):
+ pytest.skip("lxd_config_dict requires LXD")
+ launch_kwargs["config_dict"] = lxd_config_dict
+ if lxd_use_exec is not None:
+ if not isinstance(session_cloud, _LxdIntegrationCloud):
+ pytest.skip("lxd_use_exec requires LXD")
+ launch_kwargs["execute_via_ssh"] = False
+ local_launch_kwargs = {}
+ if lxd_setup is not None:
+ if not isinstance(session_cloud, _LxdIntegrationCloud):
+ pytest.skip("lxd_setup requires LXD")
+ local_launch_kwargs["lxd_setup"] = lxd_setup
+
with session_cloud.launch(
- user_data=user_data, launch_kwargs=launch_kwargs
+ user_data=user_data, launch_kwargs=launch_kwargs, **local_launch_kwargs
) as instance:
+ if lxd_use_exec is not None and isinstance(
+ instance.instance, LXDInstance
+ ):
+ # Existing instances are not affected by the launch kwargs, so
+ # ensure it here; we still need the launch kwarg so waiting works
+ instance.instance.execute_via_ssh = False
+ previous_failures = request.session.testsfailed
yield instance
+ test_failed = request.session.testsfailed - previous_failures > 0
+ _collect_logs(instance, request.node.nodeid, test_failed)
-@pytest.yield_fixture
-def client(request, fixture_utils, session_cloud):
+@pytest.fixture
+def client(request, fixture_utils, session_cloud, setup_image):
"""Provide a client that runs for every test."""
with _client(request, fixture_utils, session_cloud) as client:
yield client
-@pytest.yield_fixture(scope='module')
-def module_client(request, fixture_utils, session_cloud):
+@pytest.fixture(scope="module")
+def module_client(request, fixture_utils, session_cloud, setup_image):
"""Provide a client that runs once per module."""
with _client(request, fixture_utils, session_cloud) as client:
yield client
-@pytest.yield_fixture(scope='class')
-def class_client(request, fixture_utils, session_cloud):
+@pytest.fixture(scope="class")
+def class_client(request, fixture_utils, session_cloud, setup_image):
"""Provide a client that runs once per class."""
with _client(request, fixture_utils, session_cloud) as client:
yield client
@@ -180,3 +312,20 @@ def pytest_assertrepr_compare(op, left, right):
'"{}" not in cloud-init.log string; unexpectedly found on'
" these lines:".format(left)
] + found_lines
+
+
+def pytest_configure(config):
+ """Perform initial configuration, before the test runs start.
+
+ This hook is only called if integration tests are being executed, so we can
+ use it to configure defaults for integration testing that differ from the
+ rest of the tests in the codebase.
+
+ See
+ https://docs.pytest.org/en/latest/reference.html#_pytest.hookspec.pytest_configure
+ for pytest's documentation.
+ """
+ if "log_cli_level" in config.option and not config.option.log_cli_level:
+ # If log_cli_level is available in this version of pytest and not set
+ # to anything, set it to INFO.
+ config.option.log_cli_level = "INFO"
diff --git a/tests/integration_tests/datasources/test_lxd_discovery.py b/tests/integration_tests/datasources/test_lxd_discovery.py
new file mode 100644
index 00000000..eb2a4cf2
--- /dev/null
+++ b/tests/integration_tests/datasources/test_lxd_discovery.py
@@ -0,0 +1,90 @@
+import json
+
+import pytest
+import yaml
+
+from tests.integration_tests.clouds import ImageSpecification
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import verify_clean_log
+
+
+def _customize_envionment(client: IntegrationInstance):
+ client.write_to_file(
+ "/etc/cloud/cloud.cfg.d/99-detect-lxd.cfg",
+ "datasource_list: [LXD]\n",
+ )
+ client.execute("cloud-init clean --logs")
+ client.restart()
+
+
+# This test should be able to work on any cloud whose datasource specifies
+# a NETWORK dependency
+@pytest.mark.lxd_container
+@pytest.mark.lxd_vm
+@pytest.mark.ubuntu # Because netplan
+def test_lxd_datasource_discovery(client: IntegrationInstance):
+ """Test that DataSourceLXD is detected instead of NoCloud."""
+ _customize_envionment(client)
+ nic_dev = "enp5s0" if client.settings.PLATFORM == "lxd_vm" else "eth0"
+ result = client.execute("cloud-init status --long")
+ if not result.ok:
+ raise AssertionError("cloud-init failed:\n%s", result.stderr)
+ if "DataSourceLXD" not in result.stdout:
+ raise AssertionError(
+ "cloud-init did not discover DataSourceLXD", result.stdout
+ )
+ netplan_yaml = client.execute("cat /etc/netplan/50-cloud-init.yaml")
+ netplan_cfg = yaml.safe_load(netplan_yaml)
+ assert {
+ "network": {"ethernets": {nic_dev: {"dhcp4": True}}, "version": 2}
+ } == netplan_cfg
+ log = client.read_from_file("/var/log/cloud-init.log")
+ verify_clean_log(log)
+ result = client.execute("cloud-id")
+ if result.stdout != "lxd":
+ raise AssertionError(
+ "cloud-id didn't report lxd. Result: %s", result.stdout
+ )
+ # Validate config instance data represented
+ data = json.loads(
+ client.read_from_file("/run/cloud-init/instance-data.json")
+ )
+ v1 = data["v1"]
+ ds_cfg = data["ds"]
+ assert "lxd" == v1["platform"]
+ assert "LXD socket API v. 1.0 (/dev/lxd/sock)" == v1["subplatform"]
+ ds_cfg = json.loads(client.execute("cloud-init query ds").stdout)
+ assert ["_doc", "_metadata_api_version", "config", "meta-data"] == sorted(
+ list(ds_cfg.keys())
+ )
+ if (
+ client.settings.PLATFORM == "lxd_vm"
+ and ImageSpecification.from_os_image().release == "bionic"
+ ):
+ # pycloudlib injects user.vendor_data for lxd_vm on bionic
+ # to start the lxd-agent.
+ # https://github.com/canonical/pycloudlib/blob/main/pycloudlib/\
+ # lxd/defaults.py#L13-L27
+ # Underscore-delimited aliases exist for any keys containing hyphens or
+ # dots.
+ lxd_config_keys = ["user.meta-data", "user.vendor-data"]
+ else:
+ lxd_config_keys = ["user.meta-data"]
+ assert "1.0" == ds_cfg["_metadata_api_version"]
+ assert lxd_config_keys == list(ds_cfg["config"].keys())
+ assert {"public-keys": v1["public_ssh_keys"][0]} == (
+ yaml.safe_load(ds_cfg["config"]["user.meta-data"])
+ )
+ assert "#cloud-config\ninstance-id" in ds_cfg["meta-data"]
+ # Assert NoCloud seed data is still present in cloud image metadata
+ # This will start failing if we redact metadata templates from
+ # https://cloud-images.ubuntu.com/daily/server/jammy/current/\
+ # jammy-server-cloudimg-amd64-lxd.tar.xz
+ nocloud_metadata = yaml.safe_load(
+ client.read_from_file("/var/lib/cloud/seed/nocloud-net/meta-data")
+ )
+ assert client.instance.name == nocloud_metadata["instance-id"]
+ assert (
+ nocloud_metadata["instance-id"] == nocloud_metadata["local-hostname"]
+ )
+ assert v1["public_ssh_keys"][0] == nocloud_metadata["public-keys"]
diff --git a/tests/integration_tests/datasources/test_network_dependency.py b/tests/integration_tests/datasources/test_network_dependency.py
new file mode 100644
index 00000000..32ac7053
--- /dev/null
+++ b/tests/integration_tests/datasources/test_network_dependency.py
@@ -0,0 +1,33 @@
+import pytest
+
+from tests.integration_tests.instances import IntegrationInstance
+
+
+def _customize_envionment(client: IntegrationInstance):
+ # Insert our "disable_network_activation" file here
+ client.write_to_file(
+ "/etc/cloud/cloud.cfg.d/99-disable-network-activation.cfg",
+ "disable_network_activation: true\n",
+ )
+ client.execute("cloud-init clean --logs")
+ client.restart()
+
+
+# This test should be able to work on any cloud whose datasource specifies
+# a NETWORK dependency
+@pytest.mark.gce
+@pytest.mark.ubuntu # Because netplan
+def test_network_activation_disabled(client: IntegrationInstance):
+ """Test that the network is not activated during init mode."""
+ _customize_envionment(client)
+ result = client.execute("systemctl status google-guest-agent.service")
+ if not result.ok:
+ raise AssertionError(
+ "google-guest-agent is not active:\n%s", result.stdout
+ )
+ log = client.read_from_file("/var/log/cloud-init.log")
+
+ assert "Running command ['netplan', 'apply']" not in log
+
+ assert "Not bringing up newly configured network interfaces" in log
+ assert "Bringing up newly configured network interfaces" not in log
diff --git a/tests/integration_tests/instances.py b/tests/integration_tests/instances.py
index 9b13288c..e26ee233 100644
--- a/tests/integration_tests/instances.py
+++ b/tests/integration_tests/instances.py
@@ -2,34 +2,61 @@
import logging
import os
import uuid
+from enum import Enum
from tempfile import NamedTemporaryFile
from pycloudlib.instance import BaseInstance
from pycloudlib.result import Result
from tests.integration_tests import integration_settings
+from tests.integration_tests.util import retry
try:
from typing import TYPE_CHECKING
+
if TYPE_CHECKING:
- from tests.integration_tests.clouds import IntegrationCloud
+ from tests.integration_tests.clouds import ( # noqa: F401
+ IntegrationCloud,
+ )
except ImportError:
pass
-log = logging.getLogger('integration_testing')
+log = logging.getLogger("integration_testing")
def _get_tmp_path():
tmp_filename = str(uuid.uuid4())
- return '/var/tmp/{}.tmp'.format(tmp_filename)
+ return "/var/tmp/{}.tmp".format(tmp_filename)
-class IntegrationInstance:
- use_sudo = True
+class CloudInitSource(Enum):
+ """Represents the cloud-init image source setting as a defined value.
+
+ Values here represent all possible values for CLOUD_INIT_SOURCE in
+ tests/integration_tests/integration_settings.py. See that file for an
+ explanation of these values. If the value set there can't be parsed into
+ one of these values, an exception will be raised
+ """
+
+ NONE = 1
+ IN_PLACE = 2
+ PROPOSED = 3
+ PPA = 4
+ DEB_PACKAGE = 5
+ UPGRADE = 6
- def __init__(self, cloud: 'IntegrationCloud', instance: BaseInstance,
- settings=integration_settings):
+ def installs_new_version(self):
+ return self.name not in [self.NONE.name, self.IN_PLACE.name]
+
+
+class IntegrationInstance:
+ def __init__(
+ self,
+ cloud: "IntegrationCloud",
+ instance: BaseInstance,
+ settings=integration_settings,
+ ):
self.cloud = cloud
self.instance = instance
self.settings = settings
@@ -37,44 +64,53 @@ class IntegrationInstance:
def destroy(self):
self.instance.delete()
- def execute(self, command, *, use_sudo=None) -> Result:
- if self.instance.username == 'root' and use_sudo is False:
- raise Exception('Root user cannot run unprivileged')
- if use_sudo is None:
- use_sudo = self.use_sudo
+ def restart(self):
+ """Restart this instance (via cloud mechanism) and wait for boot.
+
+ This wraps pycloudlib's `BaseInstance.restart`
+ """
+ log.info("Restarting instance and waiting for boot")
+ self.instance.restart()
+
+ def execute(self, command, *, use_sudo=True) -> Result:
+ if self.instance.username == "root" and use_sudo is False:
+ raise Exception("Root user cannot run unprivileged")
return self.instance.execute(command, use_sudo=use_sudo)
def pull_file(self, remote_path, local_path):
# First copy to a temporary directory because of permissions issues
tmp_path = _get_tmp_path()
- self.instance.execute('cp {} {}'.format(remote_path, tmp_path))
- self.instance.pull_file(tmp_path, local_path)
+ self.instance.execute("cp {} {}".format(str(remote_path), tmp_path))
+ self.instance.pull_file(tmp_path, str(local_path))
def push_file(self, local_path, remote_path):
# First push to a temporary directory because of permissions issues
tmp_path = _get_tmp_path()
- self.instance.push_file(local_path, tmp_path)
- self.execute('mv {} {}'.format(tmp_path, remote_path))
+ self.instance.push_file(str(local_path), tmp_path)
+ assert self.execute("mv {} {}".format(tmp_path, str(remote_path))).ok
def read_from_file(self, remote_path) -> str:
- result = self.execute('cat {}'.format(remote_path))
+ result = self.execute("cat {}".format(remote_path))
if result.failed:
# TODO: Raise here whatever pycloudlib raises when it has
# a consistent error response
raise IOError(
- 'Failed reading remote file via cat: {}\n'
- 'Return code: {}\n'
- 'Stderr: {}\n'
- 'Stdout: {}'.format(
- remote_path, result.return_code,
- result.stderr, result.stdout)
+ "Failed reading remote file via cat: {}\n"
+ "Return code: {}\n"
+ "Stderr: {}\n"
+ "Stdout: {}".format(
+ remote_path,
+ result.return_code,
+ result.stderr,
+ result.stdout,
+ )
)
return result.stdout
def write_to_file(self, remote_path, contents: str):
# Writes file locally and then pushes it rather
# than writing the file directly on the instance
- with NamedTemporaryFile('w', delete=False) as tmp_file:
+ with NamedTemporaryFile("w", delete=False) as tmp_file:
tmp_file.write(contents)
try:
@@ -83,48 +119,79 @@ class IntegrationInstance:
os.unlink(tmp_file.name)
def snapshot(self):
- return self.cloud.snapshot(self.instance)
-
- def _install_new_cloud_init(self, remote_script):
- self.execute(remote_script)
- version = self.execute('cloud-init -v').split()[-1]
- log.info('Installed cloud-init version: %s', version)
- self.instance.clean()
- image_id = self.snapshot()
- log.info('Created new image: %s', image_id)
- self.cloud.image_id = image_id
-
+ image_id = self.cloud.snapshot(self.instance)
+ log.info("Created new image: %s", image_id)
+ return image_id
+
+ def install_new_cloud_init(
+ self,
+ source: CloudInitSource,
+ take_snapshot=True,
+ clean=True,
+ ):
+ if source == CloudInitSource.DEB_PACKAGE:
+ self.install_deb()
+ elif source == CloudInitSource.PPA:
+ self.install_ppa()
+ elif source == CloudInitSource.PROPOSED:
+ self.install_proposed_image()
+ elif source == CloudInitSource.UPGRADE:
+ self.upgrade_cloud_init()
+ else:
+ raise Exception(
+ "Specified to install {} which isn't supported here".format(
+ source
+ )
+ )
+ version = self.execute("cloud-init -v").split()[-1]
+ log.info("Installed cloud-init version: %s", version)
+ if clean:
+ self.instance.clean()
+ if take_snapshot:
+ snapshot_id = self.snapshot()
+ self.cloud.snapshot_id = snapshot_id
+
+ # assert with retry because we can compete with apt already running in the
+ # background and get: E: Could not get lock /var/lib/apt/lists/lock - open
+ # (11: Resource temporarily unavailable)
+
+ @retry(tries=30, delay=1)
def install_proposed_image(self):
- log.info('Installing proposed image')
- remote_script = (
- '{sudo} echo deb "http://archive.ubuntu.com/ubuntu '
- '$(lsb_release -sc)-proposed main" | '
- '{sudo} tee /etc/apt/sources.list.d/proposed.list\n'
- '{sudo} apt-get update -q\n'
- '{sudo} apt-get install -qy cloud-init'
- ).format(sudo='sudo' if self.use_sudo else '')
- self._install_new_cloud_init(remote_script)
-
- def install_ppa(self, repo):
- log.info('Installing PPA')
- remote_script = (
- '{sudo} add-apt-repository {repo} -y && '
- '{sudo} apt-get update -q && '
- '{sudo} apt-get install -qy cloud-init'
- ).format(sudo='sudo' if self.use_sudo else '', repo=repo)
- self._install_new_cloud_init(remote_script)
-
+ log.info("Installing proposed image")
+ assert self.execute(
+ 'echo deb "http://archive.ubuntu.com/ubuntu '
+ '$(lsb_release -sc)-proposed main" >> '
+ "/etc/apt/sources.list.d/proposed.list"
+ ).ok
+ assert self.execute("apt-get update -q").ok
+ assert self.execute("apt-get install -qy cloud-init").ok
+
+ @retry(tries=30, delay=1)
+ def install_ppa(self):
+ log.info("Installing PPA")
+ assert self.execute(
+ "add-apt-repository {} -y".format(self.settings.CLOUD_INIT_SOURCE)
+ ).ok
+ assert self.execute("apt-get update -q").ok
+ assert self.execute("apt-get install -qy cloud-init").ok
+
+ @retry(tries=30, delay=1)
def install_deb(self):
- log.info('Installing deb package')
+ log.info("Installing deb package")
deb_path = integration_settings.CLOUD_INIT_SOURCE
deb_name = os.path.basename(deb_path)
- remote_path = '/var/tmp/{}'.format(deb_name)
+ remote_path = "/var/tmp/{}".format(deb_name)
self.push_file(
local_path=integration_settings.CLOUD_INIT_SOURCE,
- remote_path=remote_path)
- remote_script = '{sudo} dpkg -i {path}'.format(
- sudo='sudo' if self.use_sudo else '', path=remote_path)
- self._install_new_cloud_init(remote_script)
+ remote_path=remote_path,
+ )
+ assert self.execute("dpkg -i {path}".format(path=remote_path)).ok
+
+ @retry(tries=30, delay=1)
+ def upgrade_cloud_init(self):
+ log.info("Upgrading cloud-init to latest version in archive")
+ assert self.execute("apt-get update -q").ok
+ assert self.execute("apt-get install -qy cloud-init").ok
def __enter__(self):
return self
@@ -132,23 +199,3 @@ class IntegrationInstance:
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.settings.KEEP_INSTANCE:
self.destroy()
-
-
-class IntegrationEc2Instance(IntegrationInstance):
- pass
-
-
-class IntegrationGceInstance(IntegrationInstance):
- pass
-
-
-class IntegrationAzureInstance(IntegrationInstance):
- pass
-
-
-class IntegrationOciInstance(IntegrationInstance):
- pass
-
-
-class IntegrationLxdInstance(IntegrationInstance):
- use_sudo = False
diff --git a/tests/integration_tests/integration_settings.py b/tests/integration_tests/integration_settings.py
index a0609f7e..f27e4f12 100644
--- a/tests/integration_tests/integration_settings.py
+++ b/tests/integration_tests/integration_settings.py
@@ -1,29 +1,40 @@
# This file is part of cloud-init. See LICENSE file for license information.
import os
+from cloudinit.util import is_false, is_true
+
##################################################################
# LAUNCH SETTINGS
##################################################################
# Keep instance (mostly for debugging) when test is finished
KEEP_INSTANCE = False
+# Keep snapshot image (mostly for debugging) when test is finished
+KEEP_IMAGE = False
+# Run tests marked as unstable. Expect failures and dragons.
+RUN_UNSTABLE = False
# One of:
# lxd_container
+# lxd_vm
# azure
# ec2
# gce
# oci
-PLATFORM = 'lxd_container'
+# openstack
+PLATFORM = "lxd_container"
# The cloud-specific instance type to run. E.g., a1.medium on AWS
# If the pycloudlib instance provides a default, this can be left None
INSTANCE_TYPE = None
# Determines the base image to use or generate new images from.
-# Can be the name of the OS if running a stock image,
-# otherwise the id of the image being used if using a custom image
-OS_IMAGE = 'focal'
+#
+# This can be the name of an Ubuntu release, or in the format
+# <image_id>[::<os>[::<release>]]. If given, os and release should describe
+# the image specified by image_id. (Ubuntu releases are converted to this
+# format internally; in this case, to "focal::ubuntu::focal".)
+OS_IMAGE = "focal"
# Populate if you want to use a pre-launched instance instead of
# creating a new one. The exact contents will be platform dependent
@@ -49,35 +60,30 @@ EXISTING_INSTANCE_ID = None
# code.
# PROPOSED
# Install from the Ubuntu proposed repo
+# UPGRADE
+# Upgrade cloud-init to the version in the Ubuntu archive
# <ppa repo>, e.g., ppa:cloud-init-dev/proposed
# Install from a PPA. It MUST start with 'ppa:'
# <file path>
# A path to a valid package to be uploaded and installed
-CLOUD_INIT_SOURCE = 'NONE'
-
-##################################################################
-# GCE SPECIFIC SETTINGS
-##################################################################
-# Required for GCE
-GCE_PROJECT = None
+CLOUD_INIT_SOURCE = "NONE"
-# You probably want to override these
-GCE_REGION = 'us-central1'
-GCE_ZONE = 'a'
-
-##################################################################
-# OCI SPECIFIC SETTINGS
-##################################################################
-# Compartment-id found at
-# https://console.us-phoenix-1.oraclecloud.com/a/identity/compartments
-# Required for Oracle
-OCI_COMPARTMENT_ID = None
+# Before an instance is torn down, we run `cloud-init collect-logs`
+# and transfer them locally. These settings specify when to collect these
+# logs and where to put them on the local filesystem
+# One of:
+# 'ALWAYS'
+# 'ON_ERROR'
+# 'NEVER'
+COLLECT_LOGS = "ON_ERROR"
+LOCAL_LOG_PATH = "/tmp/cloud_init_test_logs"
##################################################################
# USER SETTINGS OVERRIDES
##################################################################
# Bring in any user-file defined settings
try:
+ # pylint: disable=wildcard-import,unused-wildcard-import
from tests.integration_tests.user_settings import * # noqa
except ImportError:
pass
@@ -91,6 +97,13 @@ except ImportError:
# Perhaps a bit too hacky, but it works :)
current_settings = [var for var in locals() if var.isupper()]
for setting in current_settings:
- globals()[setting] = os.getenv(
- 'CLOUD_INIT_{}'.format(setting), globals()[setting]
+ env_setting = os.getenv(
+ "CLOUD_INIT_{}".format(setting), globals()[setting]
)
+ if isinstance(env_setting, str):
+ env_setting = env_setting.strip()
+ if is_true(env_setting):
+ env_setting = True
+ elif is_false(env_setting):
+ env_setting = False
+ globals()[setting] = env_setting
diff --git a/tests/integration_tests/modules/test_apt.py b/tests/integration_tests/modules/test_apt.py
new file mode 100644
index 00000000..adab46a8
--- /dev/null
+++ b/tests/integration_tests/modules/test_apt.py
@@ -0,0 +1,354 @@
+"""Series of integration tests covering apt functionality."""
+import re
+
+import pytest
+
+from cloudinit import gpg
+from cloudinit.config import cc_apt_configure
+from tests.integration_tests.clouds import ImageSpecification
+from tests.integration_tests.instances import IntegrationInstance
+
+USER_DATA = """\
+#cloud-config
+apt:
+ conf: |
+ APT {
+ Get {
+ Assume-Yes "true";
+ Fix-Broken "true";
+ }
+ }
+ primary:
+ - arches: [default]
+ uri: http://badarchive.ubuntu.com/ubuntu
+ security:
+ - arches: [default]
+ uri: http://badsecurity.ubuntu.com/ubuntu
+ sources_list: |
+ deb $MIRROR $RELEASE main restricted
+ deb-src $MIRROR $RELEASE main restricted
+ deb $PRIMARY $RELEASE universe restricted
+ deb-src $PRIMARY $RELEASE universe restricted
+ deb $SECURITY $RELEASE-security multiverse
+ deb-src $SECURITY $RELEASE-security multiverse
+ sources:
+ test_keyserver:
+ keyid: 110E21D8B0E2A1F0243AF6820856F197B892ACEA
+ keyserver: keyserver.ubuntu.com
+ source: "deb http://ppa.launchpad.net/canonical-kernel-team/ppa/ubuntu $RELEASE main"
+ test_ppa:
+ keyid: 441614D8
+ keyserver: keyserver.ubuntu.com
+ source: "ppa:simplestreams-dev/trunk"
+ test_signed_by:
+ keyid: A2EB2DEC0BD7519B7B38BE38376A290EC8068B11
+ keyserver: keyserver.ubuntu.com
+ source: "deb [signed-by=$KEY_FILE] http://ppa.launchpad.net/juju/stable/ubuntu $RELEASE main"
+ test_bad_key:
+ key: ""
+ source: "deb $MIRROR $RELEASE main"
+ test_key:
+ source: "deb http://ppa.launchpad.net/cloud-init-dev/test-archive/ubuntu $RELEASE main"
+ key: |
+ -----BEGIN PGP PUBLIC KEY BLOCK-----
+ Version: SKS 1.1.6
+ Comment: Hostname: keyserver.ubuntu.com
+
+ mQINBFbZRUIBEAC+A0PIKYBP9kLC4hQtRrffRS11uLo8/BdtmOdrlW0hpPHzCfKnjR3tvSEI
+ lqPHG1QrrjAXKZDnZMRz+h/px7lUztvytGzHPSJd5ARUzAyjyRezUhoJ3VSCxrPqx62avuWf
+ RfoJaIeHfDehL5/dTVkyiWxfVZ369ZX6JN2AgLsQTeybTQ75+2z0xPrrhnGmgh6g0qTYcAaq
+ M5ONOGiqeSBX/Smjh6ALy5XkhUiFGLsI7Yluf6XSICY/x7gd6RAfgSIQrUTNMoS1sqhT4aot
+ +xvOfQy8ySkfAK4NddXql6E/+ZqTmBY/Lr0YklFBy8jGT+UysfiIznPMIwbmgq5Li7BtDDtX
+ b8Uyi4edPpjtextezfXYn4NVIpPL5dPZS/FXh4HpzyH0pYCfrH4QDGA7i52AGmhpiOFjJMo6
+ N33sdjZHOH/2Vyp+QZaQnsdUAi1N4M6c33tQbpIScn1SY+El8z5JDA4PBzkw8HpLCi1gGoa6
+ V4kfbWqXXbGAJFkLkP/vc4+pY9axOlmCkJg7xCPwhI75y1cONgovhz+BEXOzolh5KZuGbGbj
+ xe0wva5DLBeIg7EQFf+99pOS7Syby3Xpm6ZbswEFV0cllK4jf/QMjtfInxobuMoI0GV0bE5l
+ WlRtPCK5FnbHwxi0wPNzB/5fwzJ77r6HgPrR0OkT0lWmbUyoOQARAQABtC1MYXVuY2hwYWQg
+ UFBBIGZvciBjbG91ZCBpbml0IGRldmVsb3BtZW50IHRlYW2JAjgEEwECACIFAlbZRUICGwMG
+ CwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEAg9Bvvk0wTfHfcP/REK5N2s1JYc69qEa9ZN
+ o6oi+A7l6AYw+ZY88O5TJe7F9otv5VXCIKSUT0Vsepjgf0mtXAgf/sb2lsJn/jp7tzgov3YH
+ vSrkTkRydz8xcA87gwQKePuvTLxQpftF4flrBxgSueIn5O/tPrBOxLz7EVYBc78SKg9aj9L2
+ yUp+YuNevlwfZCTYeBb9r3FHaab2HcgkwqYch66+nKYfwiLuQ9NzXXm0Wn0JcEQ6pWvJscbj
+ C9BdawWovfvMK5/YLfI6Btm7F4mIpQBdhSOUp/YXKmdvHpmwxMCN2QhqYK49SM7qE9aUDbJL
+ arppSEBtlCLWhRBZYLTUna+BkuQ1bHz4St++XTR49Qd7vDERALpApDjB2dxPfMiBzCMwQQyq
+ uy13exU8o2ETLg+dZSLfDTzrBNsBFmXlw8WW17nTISYdKeGKL+QdlUjpzdwUMMzHhAO8SmMH
+ zjeSlDSRMXBJFAFSbCl7EwmMKa3yVX0zInT91fNllZ3iatAmtVdqVH/BFQfTIMH2ET7A8WzJ
+ ZzVSuMRhqoKdr5AMcHuJGPUoVkVJHQA+NNvEiXSysF3faL7jmKapmUwrhpYYX2H8pf+VMu2e
+ cLflKTI28dl+ZQ4Pl/aVsxrti/pzhdYy05Sn5ddtySyIkvo8L1cU5MWpbvSlFPkTstBUDLBf
+ pb0uBy+g0oxJQg15
+ =uy53
+ -----END PGP PUBLIC KEY BLOCK-----
+apt_pipelining: os
+""" # noqa: E501
+
+EXPECTED_REGEXES = [
+ r"deb http://badarchive.ubuntu.com/ubuntu [a-z]+ main restricted",
+ r"deb-src http://badarchive.ubuntu.com/ubuntu [a-z]+ main restricted",
+ r"deb http://badarchive.ubuntu.com/ubuntu [a-z]+ universe restricted",
+ r"deb-src http://badarchive.ubuntu.com/ubuntu [a-z]+ universe restricted",
+ r"deb http://badsecurity.ubuntu.com/ubuntu [a-z]+-security multiverse",
+ r"deb-src http://badsecurity.ubuntu.com/ubuntu [a-z]+-security multiverse",
+]
+
+TEST_KEYSERVER_KEY = "110E 21D8 B0E2 A1F0 243A F682 0856 F197 B892 ACEA"
+TEST_PPA_KEY = "3552 C902 B4DD F7BD 3842 1821 015D 28D7 4416 14D8"
+TEST_KEY = "1FF0 D853 5EF7 E719 E5C8 1B9C 083D 06FB E4D3 04DF"
+TEST_SIGNED_BY_KEY = "A2EB 2DEC 0BD7 519B 7B38 BE38 376A 290E C806 8B11"
+
+
+@pytest.mark.ubuntu
+@pytest.mark.user_data(USER_DATA)
+class TestApt:
+ def get_keys(self, class_client: IntegrationInstance):
+ """Return all keys in /etc/apt/trusted.gpg.d/ and /etc/apt/trusted.gpg
+ in human readable format. Mimics the output of apt-key finger
+ """
+ list_cmd = " ".join(gpg.GPG_LIST) + " "
+ keys = class_client.execute(list_cmd + cc_apt_configure.APT_LOCAL_KEYS)
+ print(keys)
+ files = class_client.execute(
+ "ls " + cc_apt_configure.APT_TRUSTED_GPG_DIR
+ )
+ for file in files.split():
+ path = cc_apt_configure.APT_TRUSTED_GPG_DIR + file
+ keys += class_client.execute(list_cmd + path) or ""
+ return keys
+
+ def test_sources_list(self, class_client: IntegrationInstance):
+ """Integration test for the apt module's `sources_list` functionality.
+
+ This test specifies a ``sources_list`` and then checks that (a) the
+ expected number of sources.list entries is present, and (b) that each
+ expected line appears in the file.
+
+ (This is ported from
+ `tests/cloud_tests/testcases/modules/apt_configure_sources_list.yaml`.)
+ """
+ sources_list = class_client.read_from_file("/etc/apt/sources.list")
+ assert 6 == len(sources_list.rstrip().split("\n"))
+
+ for expected_re in EXPECTED_REGEXES:
+ assert re.search(expected_re, sources_list) is not None
+
+ def test_apt_conf(self, class_client: IntegrationInstance):
+ """Test the apt conf functionality.
+
+ Ported from tests/cloud_tests/testcases/modules/apt_configure_conf.py
+ """
+ apt_config = class_client.read_from_file(
+ "/etc/apt/apt.conf.d/94cloud-init-config"
+ )
+ assert 'Assume-Yes "true";' in apt_config
+ assert 'Fix-Broken "true";' in apt_config
+
+ def test_ppa_source(self, class_client: IntegrationInstance):
+ """Test the apt ppa functionality.
+
+ Ported from
+ tests/cloud_tests/testcases/modules/apt_configure_sources_ppa.py
+ """
+ release = ImageSpecification.from_os_image().release
+ ppa_path_contents = class_client.read_from_file(
+ "/etc/apt/sources.list.d/"
+ "simplestreams-dev-ubuntu-trunk-{}.list".format(release)
+ )
+
+ assert (
+ "http://ppa.launchpad.net/simplestreams-dev/trunk/ubuntu"
+ in ppa_path_contents
+ )
+
+ assert TEST_PPA_KEY in self.get_keys(class_client)
+
+ def test_signed_by(self, class_client: IntegrationInstance):
+ """Test the apt signed-by functionality."""
+ release = ImageSpecification.from_os_image().release
+ source = (
+ "deb [signed-by=/etc/apt/cloud-init.gpg.d/test_signed_by.gpg] "
+ "http://ppa.launchpad.net/juju/stable/ubuntu"
+ " {} main".format(release)
+ )
+ path_contents = class_client.read_from_file(
+ "/etc/apt/sources.list.d/test_signed_by.list"
+ )
+ assert path_contents == source
+
+ key = class_client.execute(
+ "gpg --no-default-keyring --with-fingerprint --list-keys "
+ "--keyring /etc/apt/cloud-init.gpg.d/test_signed_by.gpg"
+ )
+
+ assert TEST_SIGNED_BY_KEY in key
+
+ def test_bad_key(self, class_client: IntegrationInstance):
+ """Test the apt signed-by functionality."""
+ with pytest.raises(OSError):
+ class_client.read_from_file(
+ "/etc/apt/trusted.list.d/test_bad_key.gpg"
+ )
+
+ def test_key(self, class_client: IntegrationInstance):
+ """Test the apt key functionality.
+
+ Ported from
+ tests/cloud_tests/testcases/modules/apt_configure_sources_key.py
+ """
+ test_archive_contents = class_client.read_from_file(
+ "/etc/apt/sources.list.d/test_key.list"
+ )
+
+ assert (
+ "http://ppa.launchpad.net/cloud-init-dev/test-archive/ubuntu"
+ in test_archive_contents
+ )
+ assert TEST_KEY in self.get_keys(class_client)
+
+ def test_keyserver(self, class_client: IntegrationInstance):
+ """Test the apt keyserver functionality.
+
+ Ported from
+ tests/cloud_tests/testcases/modules/apt_configure_sources_keyserver.py
+ """
+ test_keyserver_contents = class_client.read_from_file(
+ "/etc/apt/sources.list.d/test_keyserver.list"
+ )
+
+ assert (
+ "http://ppa.launchpad.net/canonical-kernel-team/ppa/ubuntu"
+ in test_keyserver_contents
+ )
+
+ assert TEST_KEYSERVER_KEY in self.get_keys(class_client)
+
+ def test_os_pipelining(self, class_client: IntegrationInstance):
+ """Test 'os' settings does not write apt config file.
+
+ Ported from tests/cloud_tests/testcases/modules/apt_pipelining_os.py
+ """
+ conf_exists = class_client.execute(
+ "test -f /etc/apt/apt.conf.d/90cloud-init-pipelining"
+ ).ok
+ assert conf_exists is False
+
+
+_DEFAULT_DATA = """\
+#cloud-config
+apt:
+ primary:
+ - arches:
+ - default
+ {uri}
+ security:
+ - arches:
+ - default
+"""
+DEFAULT_DATA = _DEFAULT_DATA.format(uri="")
+
+
+@pytest.mark.ubuntu
+@pytest.mark.user_data(DEFAULT_DATA)
+class TestDefaults:
+ @pytest.mark.openstack
+ def test_primary_on_openstack(self, class_client: IntegrationInstance):
+ """Test apt default primary source on openstack.
+
+ When no uri is provided.
+ """
+ zone = class_client.execute("cloud-init query v1.availability_zone")
+ sources_list = class_client.read_from_file("/etc/apt/sources.list")
+ assert "{}.clouds.archive.ubuntu.com".format(zone) in sources_list
+
+ def test_security(self, class_client: IntegrationInstance):
+ """Test apt default security sources.
+
+ Ported from
+ tests/cloud_tests/testcases/modules/apt_configure_security.py
+ """
+ sources_list = class_client.read_from_file("/etc/apt/sources.list")
+
+ # 3 lines from main, universe, and multiverse
+ sec_url = "deb http://security.ubuntu.com/ubuntu"
+ if class_client.settings.PLATFORM == "azure":
+ sec_url = (
+ "deb http://azure.archive.ubuntu.com/ubuntu/ jammy-security"
+ )
+ sec_src_url = sec_url.replace("deb ", "# deb-src ")
+ assert 3 == sources_list.count(sec_url)
+ assert 3 == sources_list.count(sec_src_url)
+
+
+DEFAULT_DATA_WITH_URI = _DEFAULT_DATA.format(
+ uri='uri: "http://something.random.invalid/ubuntu"'
+)
+
+
+@pytest.mark.user_data(DEFAULT_DATA_WITH_URI)
+def test_default_primary_with_uri(client: IntegrationInstance):
+ """Test apt default primary sources.
+
+ Ported from
+ tests/cloud_tests/testcases/modules/apt_configure_primary.py
+ """
+ sources_list = client.read_from_file("/etc/apt/sources.list")
+ assert "archive.ubuntu.com" not in sources_list
+
+ assert "something.random.invalid" in sources_list
+
+
+DISABLED_DATA = """\
+#cloud-config
+apt:
+ disable_suites:
+ - $RELEASE
+ - $RELEASE-updates
+ - $RELEASE-backports
+ - $RELEASE-security
+apt_pipelining: false
+"""
+
+
+@pytest.mark.ubuntu
+@pytest.mark.user_data(DISABLED_DATA)
+class TestDisabled:
+ def test_disable_suites(self, class_client: IntegrationInstance):
+ """Test disabling of apt suites.
+
+ Ported from
+ tests/cloud_tests/testcases/modules/apt_configure_disable_suites.py
+ """
+ sources_list = class_client.execute(
+ "cat /etc/apt/sources.list | grep -v '^#'"
+ ).strip()
+ assert "" == sources_list
+
+ def test_disable_apt_pipelining(self, class_client: IntegrationInstance):
+ """Test disabling of apt pipelining.
+
+ Ported from
+ tests/cloud_tests/testcases/modules/apt_pipelining_disable.py
+ """
+ conf = class_client.read_from_file(
+ "/etc/apt/apt.conf.d/90cloud-init-pipelining"
+ )
+ assert 'Acquire::http::Pipeline-Depth "0";' in conf
+
+
+APT_PROXY_DATA = """\
+#cloud-config
+apt:
+ proxy: "http://proxy.internal:3128"
+ http_proxy: "http://squid.internal:3128"
+ ftp_proxy: "ftp://squid.internal:3128"
+ https_proxy: "https://squid.internal:3128"
+"""
+
+
+@pytest.mark.ubuntu
+@pytest.mark.user_data(APT_PROXY_DATA)
+def test_apt_proxy(client: IntegrationInstance):
+ """Test the apt proxy data gets written correctly."""
+ out = client.read_from_file("/etc/apt/apt.conf.d/90cloud-init-aptproxy")
+ assert 'Acquire::http::Proxy "http://proxy.internal:3128";' in out
+ assert 'Acquire::http::Proxy "http://squid.internal:3128";' in out
+ assert 'Acquire::ftp::Proxy "ftp://squid.internal:3128";' in out
+ assert 'Acquire::https::Proxy "https://squid.internal:3128";' in out
diff --git a/tests/integration_tests/modules/test_apt_configure_sources_list.py b/tests/integration_tests/modules/test_apt_configure_sources_list.py
deleted file mode 100644
index d2bcc61a..00000000
--- a/tests/integration_tests/modules/test_apt_configure_sources_list.py
+++ /dev/null
@@ -1,51 +0,0 @@
-"""Integration test for the apt module's ``sources_list`` functionality.
-
-This test specifies a ``sources_list`` and then checks that (a) the expected
-number of sources.list entries is present, and (b) that each expected line
-appears in the file.
-
-(This is ported from
-``tests/cloud_tests/testcases/modules/apt_configure_sources_list.yaml``.)"""
-import re
-
-import pytest
-
-
-USER_DATA = """\
-#cloud-config
-apt:
- primary:
- - arches: [default]
- uri: http://archive.ubuntu.com/ubuntu
- security:
- - arches: [default]
- uri: http://security.ubuntu.com/ubuntu
- sources_list: |
- deb $MIRROR $RELEASE main restricted
- deb-src $MIRROR $RELEASE main restricted
- deb $PRIMARY $RELEASE universe restricted
- deb-src $PRIMARY $RELEASE universe restricted
- deb $SECURITY $RELEASE-security multiverse
- deb-src $SECURITY $RELEASE-security multiverse
-"""
-
-EXPECTED_REGEXES = [
- r"deb http://archive.ubuntu.com/ubuntu [a-z].* main restricted",
- r"deb-src http://archive.ubuntu.com/ubuntu [a-z].* main restricted",
- r"deb http://archive.ubuntu.com/ubuntu [a-z].* universe restricted",
- r"deb-src http://archive.ubuntu.com/ubuntu [a-z].* universe restricted",
- r"deb http://security.ubuntu.com/ubuntu [a-z].*security multiverse",
- r"deb-src http://security.ubuntu.com/ubuntu [a-z].*security multiverse",
-]
-
-
-@pytest.mark.ci
-class TestAptConfigureSourcesList:
-
- @pytest.mark.user_data(USER_DATA)
- def test_sources_list(self, client):
- sources_list = client.read_from_file("/etc/apt/sources.list")
- assert 6 == len(sources_list.rstrip().split('\n'))
-
- for expected_re in EXPECTED_REGEXES:
- assert re.search(expected_re, sources_list) is not None
diff --git a/tests/integration_tests/modules/test_ca_certs.py b/tests/integration_tests/modules/test_ca_certs.py
new file mode 100644
index 00000000..7247fd7d
--- /dev/null
+++ b/tests/integration_tests/modules/test_ca_certs.py
@@ -0,0 +1,90 @@
+"""Integration tests for cc_ca_certs.
+
+(This is ported from ``tests/cloud_tests//testcases/modules/ca_certs.yaml``.)
+
+TODO:
+* Mark this as running on Debian and Alpine (once we have marks for that)
+* Implement testing for the RHEL-specific paths
+"""
+import os.path
+
+import pytest
+
+USER_DATA = """\
+#cloud-config
+ca_certs:
+ remove_defaults: true
+ trusted:
+ - |
+ -----BEGIN CERTIFICATE-----
+ MIIGJzCCBA+gAwIBAgIBATANBgkqhkiG9w0BAQUFADCBsjELMAkGA1UEBhMCRlIx
+ DzANBgNVBAgMBkFsc2FjZTETMBEGA1UEBwwKU3RyYXNib3VyZzEYMBYGA1UECgwP
+ d3d3LmZyZWVsYW4ub3JnMRAwDgYDVQQLDAdmcmVlbGFuMS0wKwYDVQQDDCRGcmVl
+ bGFuIFNhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxIjAgBgkqhkiG9w0BCQEW
+ E2NvbnRhY3RAZnJlZWxhbi5vcmcwHhcNMTIwNDI3MTAzMTE4WhcNMjIwNDI1MTAz
+ MTE4WjB+MQswCQYDVQQGEwJGUjEPMA0GA1UECAwGQWxzYWNlMRgwFgYDVQQKDA93
+ d3cuZnJlZWxhbi5vcmcxEDAOBgNVBAsMB2ZyZWVsYW4xDjAMBgNVBAMMBWFsaWNl
+ MSIwIAYJKoZIhvcNAQkBFhNjb250YWN0QGZyZWVsYW4ub3JnMIICIjANBgkqhkiG
+ 9w0BAQEFAAOCAg8AMIICCgKCAgEA3W29+ID6194bH6ejLrIC4hb2Ugo8v6ZC+Mrc
+ k2dNYMNPjcOKABvxxEtBamnSaeU/IY7FC/giN622LEtV/3oDcrua0+yWuVafyxmZ
+ yTKUb4/GUgafRQPf/eiX9urWurtIK7XgNGFNUjYPq4dSJQPPhwCHE/LKAykWnZBX
+ RrX0Dq4XyApNku0IpjIjEXH+8ixE12wH8wt7DEvdO7T3N3CfUbaITl1qBX+Nm2Z6
+ q4Ag/u5rl8NJfXg71ZmXA3XOj7zFvpyapRIZcPmkvZYn7SMCp8dXyXHPdpSiIWL2
+ uB3KiO4JrUYvt2GzLBUThp+lNSZaZ/Q3yOaAAUkOx+1h08285Pi+P8lO+H2Xic4S
+ vMq1xtLg2bNoPC5KnbRfuFPuUD2/3dSiiragJ6uYDLOyWJDivKGt/72OVTEPAL9o
+ 6T2pGZrwbQuiFGrGTMZOvWMSpQtNl+tCCXlT4mWqJDRwuMGrI4DnnGzt3IKqNwS4
+ Qyo9KqjMIPwnXZAmWPm3FOKe4sFwc5fpawKO01JZewDsYTDxVj+cwXwFxbE2yBiF
+ z2FAHwfopwaH35p3C6lkcgP2k/zgAlnBluzACUI+MKJ/G0gv/uAhj1OHJQ3L6kn1
+ SpvQ41/ueBjlunExqQSYD7GtZ1Kg8uOcq2r+WISE3Qc9MpQFFkUVllmgWGwYDuN3
+ Zsez95kCAwEAAaN7MHkwCQYDVR0TBAIwADAsBglghkgBhvhCAQ0EHxYdT3BlblNT
+ TCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwHQYDVR0OBBYEFFlfyRO6G8y5qEFKikl5
+ ajb2fT7XMB8GA1UdIwQYMBaAFCNsLT0+KV14uGw+quK7Lh5sh/JTMA0GCSqGSIb3
+ DQEBBQUAA4ICAQAT5wJFPqervbja5+90iKxi1d0QVtVGB+z6aoAMuWK+qgi0vgvr
+ mu9ot2lvTSCSnRhjeiP0SIdqFMORmBtOCFk/kYDp9M/91b+vS+S9eAlxrNCB5VOf
+ PqxEPp/wv1rBcE4GBO/c6HcFon3F+oBYCsUQbZDKSSZxhDm3mj7pb67FNbZbJIzJ
+ 70HDsRe2O04oiTx+h6g6pW3cOQMgIAvFgKN5Ex727K4230B0NIdGkzuj4KSML0NM
+ slSAcXZ41OoSKNjy44BVEZv0ZdxTDrRM4EwJtNyggFzmtTuV02nkUj1bYYYC5f0L
+ ADr6s0XMyaNk8twlWYlYDZ5uKDpVRVBfiGcq0uJIzIvemhuTrofh8pBQQNkPRDFT
+ Rq1iTo1Ihhl3/Fl1kXk1WR3jTjNb4jHX7lIoXwpwp767HAPKGhjQ9cFbnHMEtkro
+ RlJYdtRq5mccDtwT0GFyoJLLBZdHHMHJz0F9H7FNk2tTQQMhK5MVYwg+LIaee586
+ CQVqfbscp7evlgjLW98H+5zylRHAgoH2G79aHljNKMp9BOuq6SnEglEsiWGVtu2l
+ hnx8SB3sVJZHeer8f/UQQwqbAO+Kdy70NmbSaqaVtp8jOxLiidWkwSyRTsuU6D8i
+ DiH5uEqBXExjrj0FslxcVKdVj5glVcSmkLwZKbEU1OKwleT/iXFhvooWhQ==
+ -----END CERTIFICATE-----
+"""
+
+
+@pytest.mark.ubuntu
+@pytest.mark.user_data(USER_DATA)
+class TestCaCerts:
+ def test_certs_updated(self, class_client):
+ """Test that /etc/ssl/certs is updated as we expect."""
+ root = "/etc/ssl/certs"
+ filenames = class_client.execute(["ls", "-1", root]).splitlines()
+ unlinked_files = []
+ links = {}
+ for filename in filenames:
+ full_path = os.path.join(root, filename)
+ symlink_target = class_client.execute(["readlink", full_path])
+ is_symlink = symlink_target.ok
+ if is_symlink:
+ links[filename] = symlink_target
+ else:
+ unlinked_files.append(filename)
+
+ assert ["ca-certificates.crt"] == unlinked_files
+ assert "cloud-init-ca-certs.pem" == links["a535c1f3.0"]
+ assert (
+ "/usr/share/ca-certificates/cloud-init-ca-certs.crt"
+ == links["cloud-init-ca-certs.pem"]
+ )
+
+ def test_cert_installed(self, class_client):
+ """Test that our specified cert has been installed"""
+ checksum = class_client.execute(
+ "sha256sum /etc/ssl/certs/ca-certificates.crt"
+ )
+ assert (
+ "78e875f18c73c1aab9167ae0bd323391e52222cc2dbcda42d129537219300062"
+ in checksum
+ )
diff --git a/tests/integration_tests/modules/test_cli.py b/tests/integration_tests/modules/test_cli.py
new file mode 100644
index 00000000..baaa7567
--- /dev/null
+++ b/tests/integration_tests/modules/test_cli.py
@@ -0,0 +1,81 @@
+"""Integration tests for CLI functionality
+
+These would be for behavior manually invoked by user from the command line
+"""
+
+import pytest
+
+from tests.integration_tests.instances import IntegrationInstance
+
+VALID_USER_DATA = """\
+#cloud-config
+runcmd:
+ - echo 'hi' > /var/tmp/test
+"""
+
+INVALID_USER_DATA_HEADER = """\
+runcmd:
+ - echo 'hi' > /var/tmp/test
+"""
+
+INVALID_USER_DATA_SCHEMA = """\
+#cloud-config
+updates:
+ notnetwork: -1
+apt_pipelining: bogus
+"""
+
+
+@pytest.mark.user_data(VALID_USER_DATA)
+def test_valid_userdata(client: IntegrationInstance):
+ """Test `cloud-init devel schema` with valid userdata.
+
+ PR #575
+ """
+ result = client.execute("cloud-init devel schema --system")
+ assert result.ok
+ assert "Valid cloud-config: system userdata" == result.stdout.strip()
+ result = client.execute("cloud-init status --long")
+ if not result.ok:
+ raise AssertionError(
+ f"Unexpected error from cloud-init status: {result}"
+ )
+
+
+@pytest.mark.user_data(INVALID_USER_DATA_HEADER)
+def test_invalid_userdata(client: IntegrationInstance):
+ """Test `cloud-init devel schema` with invalid userdata.
+
+ PR #575
+ """
+ result = client.execute("cloud-init devel schema --system")
+ assert not result.ok
+ assert "Cloud config schema errors" in result.stderr
+ assert 'needs to begin with "#cloud-config"' in result.stderr
+ result = client.execute("cloud-init status --long")
+ if not result.ok:
+ raise AssertionError(
+ f"Unexpected error from cloud-init status: {result}"
+ )
+
+
+@pytest.mark.user_data(INVALID_USER_DATA_SCHEMA)
+def test_invalid_userdata_schema(client: IntegrationInstance):
+ """Test invalid schema represented as Warnings, not fatal
+
+ PR #1175
+ """
+ result = client.execute("cloud-init status --long")
+ assert result.ok
+ log = client.read_from_file("/var/log/cloud-init.log")
+ warning = (
+ "[WARNING]: Invalid cloud-config provided:\napt_pipelining: 'bogus'"
+ " is not valid under any of the given schemas\nupdates: Additional"
+ " properties are not allowed ('notnetwork' was unexpected)"
+ )
+ assert warning in log
+ result = client.execute("cloud-init status --long")
+ if not result.ok:
+ raise AssertionError(
+ f"Unexpected error from cloud-init status: {result}"
+ )
diff --git a/tests/integration_tests/modules/test_combined.py b/tests/integration_tests/modules/test_combined.py
new file mode 100644
index 00000000..7a9a6e27
--- /dev/null
+++ b/tests/integration_tests/modules/test_combined.py
@@ -0,0 +1,342 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+"""A set of somewhat unrelated tests that can be combined into a single
+instance launch. Generally tests should only be added here if a failure
+of the test would be unlikely to affect the running of another test using
+the same instance launch. Most independent module coherence tests can go
+here.
+"""
+import json
+import re
+
+import pytest
+
+from tests.integration_tests.clouds import ImageSpecification
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import (
+ retry,
+ verify_clean_log,
+ verify_ordered_items_in_text,
+)
+
+USER_DATA = """\
+#cloud-config
+apt:
+ primary:
+ - arches: [default]
+ uri: http://us.archive.ubuntu.com/ubuntu/
+byobu_by_default: enable
+final_message: |
+ This is my final message!
+ $version
+ $timestamp
+ $datasource
+ $uptime
+locale: en_GB.UTF-8
+locale_configfile: /etc/default/locale
+ntp:
+ servers: ['ntp.ubuntu.com']
+package_update: true
+random_seed:
+ data: 'MYUb34023nD:LFDK10913jk;dfnk:Df'
+ encoding: raw
+ file: /root/seed
+rsyslog:
+ configs:
+ - "*.* @@127.0.0.1"
+ - filename: 0-basic-config.conf
+ content: |
+ module(load="imtcp")
+ input(type="imtcp" port="514")
+ $template RemoteLogs,"/var/tmp/rsyslog.log"
+ *.* ?RemoteLogs
+ & ~
+ remotes:
+ me: "127.0.0.1"
+runcmd:
+ - echo 'hello world' > /var/tmp/runcmd_output
+
+ - #
+ - logger "My test log"
+snap:
+ squashfuse_in_container: true
+ commands:
+ - snap install hello-world
+ssh_import_id:
+ - gh:powersj
+ - lp:smoser
+timezone: US/Aleutian
+"""
+
+
+@pytest.mark.ci
+@pytest.mark.user_data(USER_DATA)
+class TestCombined:
+ def test_final_message(self, class_client: IntegrationInstance):
+ """Test that final_message module works as expected.
+
+ Also tests LP 1511485: final_message is silent.
+ """
+ client = class_client
+ log = client.read_from_file("/var/log/cloud-init.log")
+ expected = (
+ "This is my final message!\n"
+ r"\d+\.\d+.*\n"
+ r"\w{3}, \d{2} \w{3} \d{4} \d{2}:\d{2}:\d{2} \+\d{4}\n" # Datetime
+ "DataSource.*\n"
+ r"\d+\.\d+"
+ )
+
+ assert re.search(expected, log)
+
+ def test_ntp_with_apt(self, class_client: IntegrationInstance):
+ """LP #1628337.
+
+ cloud-init tries to install NTP before even
+ configuring the archives.
+ """
+ client = class_client
+ log = client.read_from_file("/var/log/cloud-init.log")
+ assert "W: Failed to fetch" not in log
+ assert "W: Some index files failed to download" not in log
+ assert "E: Unable to locate package ntp" not in log
+
+ def test_byobu(self, class_client: IntegrationInstance):
+ """Test byobu configured as enabled by default."""
+ client = class_client
+ assert client.execute('test -e "/etc/byobu/autolaunch"').ok
+
+ def test_configured_locale(self, class_client: IntegrationInstance):
+ """Test locale can be configured correctly."""
+ client = class_client
+ default_locale = client.read_from_file("/etc/default/locale")
+ assert "LANG=en_GB.UTF-8" in default_locale
+
+ locale_a = client.execute("locale -a")
+ verify_ordered_items_in_text(["en_GB.utf8", "en_US.utf8"], locale_a)
+
+ locale_gen = client.execute(
+ "cat /etc/locale.gen | grep -v '^#' | uniq"
+ )
+ verify_ordered_items_in_text(
+ ["en_GB.UTF-8", "en_US.UTF-8"], locale_gen
+ )
+
+ def test_random_seed_data(self, class_client: IntegrationInstance):
+ """Integration test for the random seed module.
+
+ This test specifies a command to be executed by the ``seed_random``
+ module, by providing a different data to be used as seed data. We will
+ then check if that seed data was actually used.
+ """
+ client = class_client
+
+ # Only read the first 31 characters, because the rest could be
+ # binary data
+ result = client.execute("head -c 31 < /root/seed")
+ assert result.startswith("MYUb34023nD:LFDK10913jk;dfnk:Df")
+
+ def test_rsyslog(self, class_client: IntegrationInstance):
+ """Test rsyslog is configured correctly."""
+ client = class_client
+ assert "My test log" in client.read_from_file("/var/tmp/rsyslog.log")
+
+ def test_runcmd(self, class_client: IntegrationInstance):
+ """Test runcmd works as expected"""
+ client = class_client
+ assert "hello world" == client.read_from_file("/var/tmp/runcmd_output")
+
+ @retry(tries=30, delay=1)
+ def test_ssh_import_id(self, class_client: IntegrationInstance):
+ """Integration test for the ssh_import_id module.
+
+ This test specifies ssh keys to be imported by the ``ssh_import_id``
+ module and then checks that if the ssh keys were successfully imported.
+
+ TODO:
+ * This test assumes that SSH keys will be imported into the
+ /home/ubuntu; this will need modification to run on other OSes.
+ """
+ client = class_client
+ ssh_output = client.read_from_file("/home/ubuntu/.ssh/authorized_keys")
+
+ assert "# ssh-import-id gh:powersj" in ssh_output
+ assert "# ssh-import-id lp:smoser" in ssh_output
+
+ def test_snap(self, class_client: IntegrationInstance):
+ """Integration test for the snap module.
+
+ This test specifies a command to be executed by the ``snap`` module
+ and then checks that if that command was executed during boot.
+ """
+ client = class_client
+ snap_output = client.execute("snap list")
+ assert "core " in snap_output
+ assert "hello-world " in snap_output
+
+ def test_timezone(self, class_client: IntegrationInstance):
+ """Integration test for the timezone module.
+
+ This test specifies a timezone to be used by the ``timezone`` module
+ and then checks that if that timezone was respected during boot.
+ """
+ client = class_client
+ timezone_output = client.execute(
+ 'date "+%Z" --date="Thu, 03 Nov 2016 00:47:00 -0400"'
+ )
+ assert timezone_output.strip() == "HDT"
+
+ def test_no_problems(self, class_client: IntegrationInstance):
+ """Test no errors, warnings, or tracebacks"""
+ client = class_client
+ status_file = client.read_from_file("/run/cloud-init/status.json")
+ status_json = json.loads(status_file)["v1"]
+ for stage in ("init", "init-local", "modules-config", "modules-final"):
+ assert status_json[stage]["errors"] == []
+ result_file = client.read_from_file("/run/cloud-init/result.json")
+ result_json = json.loads(result_file)["v1"]
+ assert result_json["errors"] == []
+
+ log = client.read_from_file("/var/log/cloud-init.log")
+ verify_clean_log(log)
+
+ def test_correct_datasource_detected(
+ self, class_client: IntegrationInstance
+ ):
+ """Test datasource is detected at the proper boot stage."""
+ client = class_client
+ status_file = client.read_from_file("/run/cloud-init/status.json")
+ parsed_datasource = json.loads(status_file)["v1"]["datasource"]
+
+ if client.settings.PLATFORM in ["lxd_container", "lxd_vm"]:
+ assert parsed_datasource.startswith("DataSourceNoCloud")
+ else:
+ platform_datasources = {
+ "azure": "DataSourceAzure [seed=/dev/sr0]",
+ "ec2": "DataSourceEc2Local",
+ "gce": "DataSourceGCELocal",
+ "oci": "DataSourceOracle",
+ "openstack": "DataSourceOpenStackLocal [net,ver=2]",
+ }
+ assert (
+ platform_datasources[client.settings.PLATFORM]
+ == parsed_datasource
+ )
+
+ def test_cloud_id_file_symlink(self, class_client: IntegrationInstance):
+ cloud_id = class_client.execute("cloud-id").stdout
+ expected_link_output = (
+ "'/run/cloud-init/cloud-id' -> "
+ f"'/run/cloud-init/cloud-id-{cloud_id}'"
+ )
+ assert expected_link_output == str(
+ class_client.execute("stat -c %N /run/cloud-init/cloud-id")
+ )
+
+ def _check_common_metadata(self, data):
+ assert data["base64_encoded_keys"] == []
+ assert data["merged_cfg"] == "redacted for non-root user"
+
+ image_spec = ImageSpecification.from_os_image()
+ assert data["sys_info"]["dist"][0] == image_spec.os
+
+ v1_data = data["v1"]
+ assert re.match(r"\d\.\d+\.\d+-\d+", v1_data["kernel_release"])
+ assert v1_data["variant"] == image_spec.os
+ assert v1_data["distro"] == image_spec.os
+ assert v1_data["distro_release"] == image_spec.release
+ assert v1_data["machine"] == "x86_64"
+ assert re.match(r"3.\d\.\d", v1_data["python_version"])
+
+ @pytest.mark.lxd_container
+ def test_instance_json_lxd(self, class_client: IntegrationInstance):
+ client = class_client
+ instance_json_file = client.read_from_file(
+ "/run/cloud-init/instance-data.json"
+ )
+
+ data = json.loads(instance_json_file)
+ self._check_common_metadata(data)
+ v1_data = data["v1"]
+ assert v1_data["cloud_name"] == "unknown"
+ assert v1_data["platform"] == "lxd"
+ assert v1_data["cloud_id"] == "lxd"
+ assert f"{v1_data['cloud_id']}" == client.read_from_file(
+ "/run/cloud-init/cloud-id-lxd"
+ )
+ assert (
+ v1_data["subplatform"]
+ == "seed-dir (/var/lib/cloud/seed/nocloud-net)"
+ )
+ assert v1_data["availability_zone"] is None
+ assert v1_data["instance_id"] == client.instance.name
+ assert v1_data["local_hostname"] == client.instance.name
+ assert v1_data["region"] is None
+
+ @pytest.mark.lxd_vm
+ def test_instance_json_lxd_vm(self, class_client: IntegrationInstance):
+ client = class_client
+ instance_json_file = client.read_from_file(
+ "/run/cloud-init/instance-data.json"
+ )
+
+ data = json.loads(instance_json_file)
+ self._check_common_metadata(data)
+ v1_data = data["v1"]
+ assert v1_data["cloud_name"] == "unknown"
+ assert v1_data["platform"] == "lxd"
+ assert v1_data["cloud_id"] == "lxd"
+ assert f"{v1_data['cloud_id']}" == client.read_from_file(
+ "/run/cloud-init/cloud-id-lxd"
+ )
+ assert any(
+ [
+ "/var/lib/cloud/seed/nocloud-net" in v1_data["subplatform"],
+ "/dev/sr0" in v1_data["subplatform"],
+ ]
+ )
+ assert v1_data["availability_zone"] is None
+ assert v1_data["instance_id"] == client.instance.name
+ assert v1_data["local_hostname"] == client.instance.name
+ assert v1_data["region"] is None
+
+ @pytest.mark.ec2
+ def test_instance_json_ec2(self, class_client: IntegrationInstance):
+ client = class_client
+ instance_json_file = client.read_from_file(
+ "/run/cloud-init/instance-data.json"
+ )
+ data = json.loads(instance_json_file)
+ v1_data = data["v1"]
+ assert v1_data["cloud_name"] == "aws"
+ assert v1_data["platform"] == "ec2"
+ # Different regions will show up as ec2-(gov|china)
+ assert v1_data["cloud_id"].startswith("ec2")
+ assert f"{v1_data['cloud_id']}" == client.read_from_file(
+ "/run/cloud-init/cloud-id-ec2"
+ )
+ assert v1_data["subplatform"].startswith("metadata")
+ assert (
+ v1_data["availability_zone"] == client.instance.availability_zone
+ )
+ assert v1_data["instance_id"] == client.instance.name
+ assert v1_data["local_hostname"].startswith("ip-")
+ assert v1_data["region"] == client.cloud.cloud_instance.region
+
+ @pytest.mark.gce
+ def test_instance_json_gce(self, class_client: IntegrationInstance):
+ client = class_client
+ instance_json_file = client.read_from_file(
+ "/run/cloud-init/instance-data.json"
+ )
+ data = json.loads(instance_json_file)
+ self._check_common_metadata(data)
+ v1_data = data["v1"]
+ assert v1_data["cloud_name"] == "gce"
+ assert v1_data["platform"] == "gce"
+ assert f"{v1_data['cloud_id']}" == client.read_from_file(
+ "/run/cloud-init/cloud-id-gce"
+ )
+ assert v1_data["subplatform"].startswith("metadata")
+ assert v1_data["availability_zone"] == client.instance.zone
+ assert v1_data["instance_id"] == client.instance.instance_id
+ assert v1_data["local_hostname"] == client.instance.name
diff --git a/tests/integration_tests/modules/test_command_output.py b/tests/integration_tests/modules/test_command_output.py
new file mode 100644
index 00000000..96525cac
--- /dev/null
+++ b/tests/integration_tests/modules/test_command_output.py
@@ -0,0 +1,21 @@
+"""Integration test for output redirection.
+
+This test redirects the output of a command to a file and then checks the file.
+
+(This is ported from
+``tests/cloud_tests/testcases/main/command_output_simple.yaml``.)"""
+import pytest
+
+from tests.integration_tests.instances import IntegrationInstance
+
+USER_DATA = """\
+#cloud-config
+output: { all: "| tee -a /var/log/cloud-init-test-output" }
+final_message: "should be last line in cloud-init-test-output file"
+"""
+
+
+@pytest.mark.user_data(USER_DATA)
+def test_runcmd(client: IntegrationInstance):
+ log = client.read_from_file("/var/log/cloud-init-test-output")
+ assert "should be last line in cloud-init-test-output file" in log
diff --git a/tests/integration_tests/modules/test_disk_setup.py b/tests/integration_tests/modules/test_disk_setup.py
new file mode 100644
index 00000000..7aaba7db
--- /dev/null
+++ b/tests/integration_tests/modules/test_disk_setup.py
@@ -0,0 +1,212 @@
+import json
+import os
+from uuid import uuid4
+
+import pytest
+from pycloudlib.lxd.instance import LXDInstance
+
+from cloudinit.subp import subp
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import verify_clean_log
+
+DISK_PATH = "/tmp/test_disk_setup_{}".format(uuid4())
+
+
+def setup_and_mount_lxd_disk(instance: LXDInstance):
+ subp(
+ "lxc config device add {} test-disk-setup-disk disk source={}".format(
+ instance.name, DISK_PATH
+ ).split()
+ )
+
+
+@pytest.fixture
+def create_disk():
+ # 640k should be enough for anybody
+ subp("dd if=/dev/zero of={} bs=1k count=640".format(DISK_PATH).split())
+ yield
+ os.remove(DISK_PATH)
+
+
+ALIAS_USERDATA = """\
+#cloud-config
+device_aliases:
+ my_alias: /dev/sdb
+disk_setup:
+ my_alias:
+ table_type: mbr
+ layout: [50, 50]
+ overwrite: True
+fs_setup:
+- label: fs1
+ device: my_alias.1
+ filesystem: ext4
+- label: fs2
+ device: my_alias.2
+ filesystem: ext4
+mounts:
+- ["my_alias.1", "/mnt1"]
+- ["my_alias.2", "/mnt2"]
+"""
+
+
+@pytest.mark.user_data(ALIAS_USERDATA)
+@pytest.mark.lxd_setup.with_args(setup_and_mount_lxd_disk)
+@pytest.mark.ubuntu
+@pytest.mark.lxd_vm
+class TestDeviceAliases:
+ """Test devices aliases work on disk setup/mount"""
+
+ def test_device_alias(self, create_disk, client: IntegrationInstance):
+ log = client.read_from_file("/var/log/cloud-init.log")
+ assert (
+ "updated disk_setup device entry 'my_alias' to '/dev/sdb'" in log
+ )
+ assert "changed my_alias.1 => /dev/sdb1" in log
+ assert "changed my_alias.2 => /dev/sdb2" in log
+ verify_clean_log(log)
+
+ lsblk = json.loads(client.execute("lsblk --json"))
+ sdb = [x for x in lsblk["blockdevices"] if x["name"] == "sdb"][0]
+ assert len(sdb["children"]) == 2
+ assert sdb["children"][0]["name"] == "sdb1"
+ assert sdb["children"][1]["name"] == "sdb2"
+ if "mountpoint" in sdb["children"][0]:
+ assert sdb["children"][0]["mountpoint"] == "/mnt1"
+ assert sdb["children"][1]["mountpoint"] == "/mnt2"
+ else:
+ assert sdb["children"][0]["mountpoints"] == ["/mnt1"]
+ assert sdb["children"][1]["mountpoints"] == ["/mnt2"]
+ result = client.execute("mount -a")
+ assert result.return_code == 0
+ assert result.stdout.strip() == ""
+ assert result.stderr.strip() == ""
+ result = client.execute("findmnt -J /mnt1")
+ assert result.return_code == 0
+ result = client.execute("findmnt -J /mnt2")
+ assert result.return_code == 0
+
+
+PARTPROBE_USERDATA = """\
+#cloud-config
+disk_setup:
+ /dev/sdb:
+ table_type: mbr
+ layout: [50, 50]
+ overwrite: True
+fs_setup:
+ - label: test
+ device: /dev/sdb1
+ filesystem: ext4
+ - label: test2
+ device: /dev/sdb2
+ filesystem: ext4
+mounts:
+- ["/dev/sdb1", "/mnt1"]
+- ["/dev/sdb2", "/mnt2"]
+"""
+
+UPDATED_PARTPROBE_USERDATA = """\
+#cloud-config
+disk_setup:
+ /dev/sdb:
+ table_type: mbr
+ layout: [100]
+ overwrite: True
+fs_setup:
+ - label: test3
+ device: /dev/sdb1
+ filesystem: ext4
+mounts:
+- ["/dev/sdb1", "/mnt3"]
+"""
+
+
+@pytest.mark.user_data(PARTPROBE_USERDATA)
+@pytest.mark.lxd_setup.with_args(setup_and_mount_lxd_disk)
+@pytest.mark.ubuntu
+@pytest.mark.lxd_vm
+class TestPartProbeAvailability:
+ """Test disk setup works with partprobe
+
+ Disk setup can run successfully on a mounted partition when
+ partprobe is being used.
+
+ lp-1920939
+ """
+
+ def _verify_first_disk_setup(self, client, log):
+ verify_clean_log(log)
+ lsblk = json.loads(client.execute("lsblk --json"))
+ sdb = [x for x in lsblk["blockdevices"] if x["name"] == "sdb"][0]
+ assert len(sdb["children"]) == 2
+ assert sdb["children"][0]["name"] == "sdb1"
+ assert sdb["children"][1]["name"] == "sdb2"
+ if "mountpoint" in sdb["children"][0]:
+ assert sdb["children"][0]["mountpoint"] == "/mnt1"
+ assert sdb["children"][1]["mountpoint"] == "/mnt2"
+ else:
+ assert sdb["children"][0]["mountpoints"] == ["/mnt1"]
+ assert sdb["children"][1]["mountpoints"] == ["/mnt2"]
+
+ # Not bionic because the LXD agent gets in the way of us
+ # changing the userdata
+ @pytest.mark.not_bionic
+ def test_disk_setup_when_mounted(
+ self, create_disk, client: IntegrationInstance
+ ):
+ """Test lp-1920939.
+
+ We insert an extra disk into our VM, format it to have two partitions,
+ modify our cloud config to mount devices before disk setup, and modify
+ our userdata to setup a single partition on the disk.
+
+ This allows cloud-init to attempt disk setup on a mounted partition.
+ When blockdev is in use, it will fail with
+ "blockdev: ioctl error on BLKRRPART: Device or resource busy" along
+ with a warning and a traceback. When partprobe is in use, everything
+ should work successfully.
+ """
+ log = client.read_from_file("/var/log/cloud-init.log")
+ self._verify_first_disk_setup(client, log)
+
+ # Update our userdata and cloud.cfg to mount then perform new disk
+ # setup
+ client.write_to_file(
+ "/var/lib/cloud/seed/nocloud-net/user-data",
+ UPDATED_PARTPROBE_USERDATA,
+ )
+ client.execute(
+ "sed -i 's/write-files/write-files\\n - mounts/' "
+ "/etc/cloud/cloud.cfg"
+ )
+
+ client.execute("cloud-init clean --logs")
+ client.restart()
+
+ # Assert new setup works as expected
+ verify_clean_log(log)
+
+ lsblk = json.loads(client.execute("lsblk --json"))
+ sdb = [x for x in lsblk["blockdevices"] if x["name"] == "sdb"][0]
+ assert len(sdb["children"]) == 1
+ assert sdb["children"][0]["name"] == "sdb1"
+ if "mountpoint" in sdb["children"][0]:
+ assert sdb["children"][0]["mountpoint"] == "/mnt3"
+ else:
+ assert sdb["children"][0]["mountpoints"] == ["/mnt3"]
+
+ def test_disk_setup_no_partprobe(
+ self, create_disk, client: IntegrationInstance
+ ):
+ """Ensure disk setup still works as expected without partprobe."""
+ # We can't do this part in a bootcmd because the path has already
+ # been found by the time we get to the bootcmd
+ client.execute("rm $(which partprobe)")
+ client.execute("cloud-init clean --logs")
+ client.restart()
+
+ log = client.read_from_file("/var/log/cloud-init.log")
+ self._verify_first_disk_setup(client, log)
+
+ assert "partprobe" not in log
diff --git a/tests/integration_tests/modules/test_growpart.py b/tests/integration_tests/modules/test_growpart.py
new file mode 100644
index 00000000..67251817
--- /dev/null
+++ b/tests/integration_tests/modules/test_growpart.py
@@ -0,0 +1,68 @@
+import json
+import os
+import pathlib
+from uuid import uuid4
+
+import pytest
+from pycloudlib.lxd.instance import LXDInstance
+
+from cloudinit.subp import subp
+from tests.integration_tests.instances import IntegrationInstance
+
+DISK_PATH = "/tmp/test_disk_setup_{}".format(uuid4())
+
+
+def setup_and_mount_lxd_disk(instance: LXDInstance):
+ subp(
+ "lxc config device add {} test-disk-setup-disk disk source={}".format(
+ instance.name, DISK_PATH
+ ).split()
+ )
+
+
+@pytest.fixture(scope="class", autouse=True)
+def create_disk():
+ """Create 16M sparse file"""
+ pathlib.Path(DISK_PATH).touch()
+ os.truncate(DISK_PATH, 1 << 24)
+ yield
+ os.remove(DISK_PATH)
+
+
+# Create undersized partition in bootcmd
+ALIAS_USERDATA = """\
+#cloud-config
+bootcmd:
+ - parted /dev/sdb --script \
+ mklabel gpt \
+ mkpart primary 0 1MiB
+ - parted /dev/sdb --script print
+growpart:
+ devices:
+ - "/"
+ - "/dev/sdb1"
+runcmd:
+ - parted /dev/sdb --script print
+"""
+
+
+@pytest.mark.user_data(ALIAS_USERDATA)
+@pytest.mark.lxd_setup.with_args(setup_and_mount_lxd_disk)
+@pytest.mark.ubuntu
+@pytest.mark.lxd_vm
+class TestGrowPart:
+ """Test growpart"""
+
+ def test_grow_part(self, client: IntegrationInstance):
+ """Verify"""
+ log = client.read_from_file("/var/log/cloud-init.log")
+ assert (
+ "cc_growpart.py[INFO]: '/dev/sdb1' resized:"
+ " changed (/dev/sdb, 1) from" in log
+ )
+
+ lsblk = json.loads(client.execute("lsblk --json"))
+ sdb = [x for x in lsblk["blockdevices"] if x["name"] == "sdb"][0]
+ assert len(sdb["children"]) == 1
+ assert sdb["children"][0]["name"] == "sdb1"
+ assert sdb["size"] == "16M"
diff --git a/tests/integration_tests/modules/test_hotplug.py b/tests/integration_tests/modules/test_hotplug.py
new file mode 100644
index 00000000..0bad761e
--- /dev/null
+++ b/tests/integration_tests/modules/test_hotplug.py
@@ -0,0 +1,112 @@
+import time
+from collections import namedtuple
+
+import pytest
+import yaml
+
+from tests.integration_tests.instances import IntegrationInstance
+
+USER_DATA = """\
+#cloud-config
+updates:
+ network:
+ when: ['hotplug']
+"""
+
+ip_addr = namedtuple("ip_addr", "interface state ip4 ip6")
+
+
+def _wait_till_hotplug_complete(client, expected_runs=1):
+ for _ in range(60):
+ log = client.read_from_file("/var/log/cloud-init.log")
+ if log.count("Exiting hotplug handler") == expected_runs:
+ return log
+ time.sleep(1)
+ raise Exception("Waiting for hotplug handler failed")
+
+
+def _get_ip_addr(client):
+ ips = []
+ lines = client.execute("ip --brief addr").split("\n")
+ for line in lines:
+ attributes = line.split()
+ interface, state = attributes[0], attributes[1]
+ ip4_cidr = attributes[2] if len(attributes) > 2 else None
+ ip6_cidr = attributes[3] if len(attributes) > 3 else None
+ ip4 = ip4_cidr.split("/")[0] if ip4_cidr else None
+ ip6 = ip6_cidr.split("/")[0] if ip6_cidr else None
+ ip = ip_addr(interface, state, ip4, ip6)
+ ips.append(ip)
+ return ips
+
+
+@pytest.mark.openstack
+# On Bionic, we traceback when attempting to detect the hotplugged
+# device in the updated metadata. This is because Bionic is specifically
+# configured not to provide network metadata.
+@pytest.mark.not_bionic
+@pytest.mark.user_data(USER_DATA)
+def test_hotplug_add_remove(client: IntegrationInstance):
+ ips_before = _get_ip_addr(client)
+ log = client.read_from_file("/var/log/cloud-init.log")
+ assert "Exiting hotplug handler" not in log
+ assert client.execute(
+ "test -f /etc/udev/rules.d/10-cloud-init-hook-hotplug.rules"
+ ).ok
+
+ # Add new NIC
+ added_ip = client.instance.add_network_interface()
+ _wait_till_hotplug_complete(client, expected_runs=1)
+ ips_after_add = _get_ip_addr(client)
+ new_addition = [ip for ip in ips_after_add if ip.ip4 == added_ip][0]
+
+ assert len(ips_after_add) == len(ips_before) + 1
+ assert added_ip not in [ip.ip4 for ip in ips_before]
+ assert added_ip in [ip.ip4 for ip in ips_after_add]
+ assert new_addition.state == "UP"
+
+ netplan_cfg = client.read_from_file("/etc/netplan/50-cloud-init.yaml")
+ config = yaml.safe_load(netplan_cfg)
+ assert new_addition.interface in config["network"]["ethernets"]
+
+ # Remove new NIC
+ client.instance.remove_network_interface(added_ip)
+ _wait_till_hotplug_complete(client, expected_runs=2)
+ ips_after_remove = _get_ip_addr(client)
+ assert len(ips_after_remove) == len(ips_before)
+ assert added_ip not in [ip.ip4 for ip in ips_after_remove]
+
+ netplan_cfg = client.read_from_file("/etc/netplan/50-cloud-init.yaml")
+ config = yaml.safe_load(netplan_cfg)
+ assert new_addition.interface not in config["network"]["ethernets"]
+
+ assert "enabled" == client.execute(
+ "cloud-init devel hotplug-hook -s net query"
+ )
+
+
+@pytest.mark.openstack
+def test_no_hotplug_in_userdata(client: IntegrationInstance):
+ ips_before = _get_ip_addr(client)
+ log = client.read_from_file("/var/log/cloud-init.log")
+ assert "Exiting hotplug handler" not in log
+ assert client.execute(
+ "test -f /etc/udev/rules.d/10-cloud-init-hook-hotplug.rules"
+ ).failed
+
+ # Add new NIC
+ client.instance.add_network_interface()
+ log = client.read_from_file("/var/log/cloud-init.log")
+ assert "hotplug-hook" not in log
+
+ ips_after_add = _get_ip_addr(client)
+ if len(ips_after_add) == len(ips_before) + 1:
+ # We can see the device, but it should not have been brought up
+ new_ip = [ip for ip in ips_after_add if ip not in ips_before][0]
+ assert new_ip.state == "DOWN"
+ else:
+ assert len(ips_after_add) == len(ips_before)
+
+ assert "disabled" == client.execute(
+ "cloud-init devel hotplug-hook -s net query"
+ )
diff --git a/tests/integration_tests/modules/test_jinja_templating.py b/tests/integration_tests/modules/test_jinja_templating.py
new file mode 100644
index 00000000..7788c6f0
--- /dev/null
+++ b/tests/integration_tests/modules/test_jinja_templating.py
@@ -0,0 +1,33 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+import pytest
+
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import verify_ordered_items_in_text
+
+USER_DATA = """\
+## template: jinja
+#cloud-config
+runcmd:
+ - echo {{v1.local_hostname}} > /var/tmp/runcmd_output
+ - echo {{merged_cfg._doc}} >> /var/tmp/runcmd_output
+ - echo {{v1['local-hostname']}} >> /var/tmp/runcmd_output
+"""
+
+
+@pytest.mark.user_data(USER_DATA)
+def test_runcmd_with_variable_substitution(client: IntegrationInstance):
+ """Test jinja substitution.
+
+ Ensure underscore-delimited aliases exist for hyphenated key and
+ we can also substitute variables from instance-data-sensitive
+ LP: #1931392.
+ """
+ hostname = client.execute("hostname").stdout.strip()
+ expected = [
+ hostname,
+ "Merged cloud-init system config from /etc/cloud/cloud.cfg and "
+ "/etc/cloud/cloud.cfg.d/",
+ hostname,
+ ]
+ output = client.read_from_file("/var/tmp/runcmd_output")
+ verify_ordered_items_in_text(expected, output)
diff --git a/tests/integration_tests/modules/test_keyboard.py b/tests/integration_tests/modules/test_keyboard.py
new file mode 100644
index 00000000..7db35014
--- /dev/null
+++ b/tests/integration_tests/modules/test_keyboard.py
@@ -0,0 +1,17 @@
+import pytest
+
+USER_DATA = """\
+#cloud-config
+keyboard:
+ layout: de
+ model: pc105
+ variant: nodeadkeys
+ options: compose:rwin
+"""
+
+
+class TestKeyboard:
+ @pytest.mark.user_data(USER_DATA)
+ def test_keyboard(self, client):
+ lc = client.execute("localectl")
+ assert "X11 Layout: de" in lc
diff --git a/tests/integration_tests/modules/test_keys_to_console.py b/tests/integration_tests/modules/test_keys_to_console.py
new file mode 100644
index 00000000..50899982
--- /dev/null
+++ b/tests/integration_tests/modules/test_keys_to_console.py
@@ -0,0 +1,113 @@
+"""Integration tests for the cc_keys_to_console module.
+
+(This is ported from
+``tests/cloud_tests/testcases/modules/keys_to_console.yaml``.)"""
+import pytest
+
+from tests.integration_tests.util import retry
+
+BLACKLIST_USER_DATA = """\
+#cloud-config
+ssh_fp_console_blacklist: [ssh-dss, ssh-dsa, ecdsa-sha2-nistp256]
+ssh_key_console_blacklist: [ssh-dss, ssh-dsa, ecdsa-sha2-nistp256]
+"""
+
+BLACKLIST_ALL_KEYS_USER_DATA = """\
+#cloud-config
+ssh_fp_console_blacklist: [ssh-dsa, ssh-ecdsa, ssh-ed25519, ssh-rsa, ssh-dss, ecdsa-sha2-nistp256]
+""" # noqa: E501
+
+DISABLED_USER_DATA = """\
+#cloud-config
+ssh:
+ emit_keys_to_console: false
+"""
+
+ENABLE_KEYS_TO_CONSOLE_USER_DATA = """\
+#cloud-config
+ssh:
+ emit_keys_to_console: true
+users:
+ - default
+ - name: barfoo
+"""
+
+
+@pytest.mark.user_data(BLACKLIST_USER_DATA)
+class TestKeysToConsoleBlacklist:
+ """Test that the blacklist options work as expected."""
+
+ @pytest.mark.parametrize("key_type", ["DSA", "ECDSA"])
+ def test_excluded_keys(self, class_client, key_type):
+ syslog = class_client.read_from_file("/var/log/syslog")
+ assert "({})".format(key_type) not in syslog
+
+ # retry decorator here because it can take some time to be reflected
+ # in syslog
+ @retry(tries=30, delay=1)
+ @pytest.mark.parametrize("key_type", ["ED25519", "RSA"])
+ def test_included_keys(self, class_client, key_type):
+ syslog = class_client.read_from_file("/var/log/syslog")
+ assert "({})".format(key_type) in syslog
+
+
+@pytest.mark.user_data(BLACKLIST_ALL_KEYS_USER_DATA)
+class TestAllKeysToConsoleBlacklist:
+ """Test that when key blacklist contains all key types that
+ no header/footer are output.
+ """
+
+ def test_header_excluded(self, class_client):
+ syslog = class_client.read_from_file("/var/log/syslog")
+ assert "BEGIN SSH HOST KEY FINGERPRINTS" not in syslog
+
+ def test_footer_excluded(self, class_client):
+ syslog = class_client.read_from_file("/var/log/syslog")
+ assert "END SSH HOST KEY FINGERPRINTS" not in syslog
+
+
+@pytest.mark.user_data(DISABLED_USER_DATA)
+class TestKeysToConsoleDisabled:
+ """Test that output can be fully disabled."""
+
+ @pytest.mark.parametrize("key_type", ["DSA", "ECDSA", "ED25519", "RSA"])
+ def test_keys_excluded(self, class_client, key_type):
+ syslog = class_client.read_from_file("/var/log/syslog")
+ assert "({})".format(key_type) not in syslog
+
+ def test_header_excluded(self, class_client):
+ syslog = class_client.read_from_file("/var/log/syslog")
+ assert "BEGIN SSH HOST KEY FINGERPRINTS" not in syslog
+
+ def test_footer_excluded(self, class_client):
+ syslog = class_client.read_from_file("/var/log/syslog")
+ assert "END SSH HOST KEY FINGERPRINTS" not in syslog
+
+
+@pytest.mark.user_data(ENABLE_KEYS_TO_CONSOLE_USER_DATA)
+@pytest.mark.ec2
+@pytest.mark.lxd_container
+@pytest.mark.oci
+@pytest.mark.openstack
+class TestKeysToConsoleEnabled:
+ """Test that output can be enabled disabled."""
+
+ def test_duplicate_messaging_console_log(self, class_client):
+ class_client.execute("cloud-init status --wait --long").ok
+ try:
+ console_log = class_client.instance.console_log()
+ except NotImplementedError:
+ # Assume that an exception here means that we can't use the console
+ # log
+ pytest.skip("NotImplementedError when requesting console log")
+ return
+ if console_log.lower() == "no console output":
+ # This test retries because we might not have the full console log
+ # on the first fetch. However, if we have no console output
+ # at all, we don't want to keep retrying as that would trigger
+ # another 5 minute wait on the pycloudlib side, which could
+ # leave us waiting for a couple hours
+ pytest.fail("no console output")
+ return
+ msg = "no authorized SSH keys fingerprints found for user barfoo."
+ assert 1 == console_log.count(msg)
diff --git a/tests/integration_tests/modules/test_lxd_bridge.py b/tests/integration_tests/modules/test_lxd_bridge.py
new file mode 100644
index 00000000..3292a833
--- /dev/null
+++ b/tests/integration_tests/modules/test_lxd_bridge.py
@@ -0,0 +1,46 @@
+"""Integration tests for LXD bridge creation.
+
+(This is ported from
+``tests/cloud_tests/testcases/modules/lxd_bridge.yaml``.)
+"""
+import pytest
+import yaml
+
+from tests.integration_tests.util import verify_clean_log
+
+USER_DATA = """\
+#cloud-config
+lxd:
+ init:
+ storage_backend: dir
+ bridge:
+ mode: new
+ name: lxdbr0
+ ipv4_address: 10.100.100.1
+ ipv4_netmask: 24
+ ipv4_dhcp_first: 10.100.100.100
+ ipv4_dhcp_last: 10.100.100.200
+ ipv4_nat: true
+ domain: lxd
+"""
+
+
+@pytest.mark.no_container
+@pytest.mark.user_data(USER_DATA)
+class TestLxdBridge:
+ @pytest.mark.parametrize("binary_name", ["lxc", "lxd"])
+ def test_binaries_installed(self, class_client, binary_name):
+ """Check that the expected LXD binaries are installed"""
+ assert class_client.execute(["which", binary_name]).ok
+
+ def test_bridge(self, class_client):
+ """Check that the given bridge is configured"""
+ cloud_init_log = class_client.read_from_file("/var/log/cloud-init.log")
+ verify_clean_log(cloud_init_log)
+
+ # The bridge should exist
+ assert class_client.execute("ip addr show lxdbr0")
+
+ raw_network_config = class_client.execute("lxc network show lxdbr0")
+ network_config = yaml.safe_load(raw_network_config)
+ assert "10.100.100.1/24" == network_config["config"]["ipv4.address"]
diff --git a/tests/integration_tests/modules/test_ntp_servers.py b/tests/integration_tests/modules/test_ntp_servers.py
index e72389c1..fc62e63b 100644
--- a/tests/integration_tests/modules/test_ntp_servers.py
+++ b/tests/integration_tests/modules/test_ntp_servers.py
@@ -1,14 +1,18 @@
-"""Integration test for the ntp module's ``servers`` functionality with ntp.
+"""Integration test for the ntp module's ntp functionality.
This test specifies the use of the `ntp` NTP client, and ensures that the given
NTP servers are configured as expected.
-(This is ported from ``tests/cloud_tests/testcases/modules/ntp_servers.yaml``.)
+(This is ported from ``tests/cloud_tests/testcases/modules/ntp_servers.yaml``,
+``tests/cloud_tests/testcases/modules/ntp_pools.yaml``,
+and ``tests/cloud_tests/testcases/modules/ntp_chrony.yaml``)
"""
import re
-import yaml
import pytest
+import yaml
+
+from tests.integration_tests.instances import IntegrationInstance
USER_DATA = """\
#cloud-config
@@ -17,21 +21,25 @@ ntp:
servers:
- 172.16.15.14
- 172.16.17.18
+ pools:
+ - 0.cloud-init.mypool
+ - 1.cloud-init.mypool
+ - 172.16.15.15
"""
EXPECTED_SERVERS = yaml.safe_load(USER_DATA)["ntp"]["servers"]
+EXPECTED_POOLS = yaml.safe_load(USER_DATA)["ntp"]["pools"]
-@pytest.mark.ci
@pytest.mark.user_data(USER_DATA)
class TestNtpServers:
-
- def test_ntp_installed(self, class_client):
+ def test_ntp_installed(self, class_client: IntegrationInstance):
"""Test that `ntpd --version` succeeds, indicating installation."""
- result = class_client.execute("ntpd --version")
- assert 0 == result.return_code
+ assert class_client.execute("ntpd --version").ok
- def test_dist_config_file_is_empty(self, class_client):
+ def test_dist_config_file_is_empty(
+ self, class_client: IntegrationInstance
+ ):
"""Test that the distributed config file is empty.
(This test is skipped on all currently supported Ubuntu releases, so
@@ -42,17 +50,79 @@ class TestNtpServers:
dist_file = class_client.read_from_file("/etc/ntp.conf.dist")
assert 0 == len(dist_file.strip().splitlines())
- def test_ntp_entries(self, class_client):
+ def test_ntp_entries(self, class_client: IntegrationInstance):
ntp_conf = class_client.read_from_file("/etc/ntp.conf")
for expected_server in EXPECTED_SERVERS:
assert re.search(
r"^server {} iburst".format(expected_server),
ntp_conf,
- re.MULTILINE
+ re.MULTILINE,
+ )
+ for expected_pool in EXPECTED_POOLS:
+ assert re.search(
+ r"^pool {} iburst".format(expected_pool),
+ ntp_conf,
+ re.MULTILINE,
)
- def test_ntpq_servers(self, class_client):
+ def test_ntpq_servers(self, class_client: IntegrationInstance):
result = class_client.execute("ntpq -p -w -n")
assert result.ok
- for expected_server in EXPECTED_SERVERS:
- assert expected_server in result.stdout
+ for expected_server_or_pool in [*EXPECTED_SERVERS, *EXPECTED_POOLS]:
+ assert expected_server_or_pool in result.stdout
+
+
+CHRONY_DATA = """\
+#cloud-config
+ntp:
+ enabled: true
+ ntp_client: chrony
+ servers:
+ - 172.16.15.14
+"""
+
+
+@pytest.mark.user_data(CHRONY_DATA)
+def test_chrony(client: IntegrationInstance):
+ if client.execute("test -f /etc/chrony.conf").ok:
+ chrony_conf = "/etc/chrony.conf"
+ else:
+ chrony_conf = "/etc/chrony/chrony.conf"
+ contents = client.read_from_file(chrony_conf)
+ assert "server 172.16.15.14" in contents
+
+
+TIMESYNCD_DATA = """\
+#cloud-config
+ntp:
+ enabled: true
+ ntp_client: systemd-timesyncd
+ servers:
+ - 172.16.15.14
+"""
+
+
+@pytest.mark.user_data(TIMESYNCD_DATA)
+def test_timesyncd(client: IntegrationInstance):
+ contents = client.read_from_file(
+ "/etc/systemd/timesyncd.conf.d/cloud-init.conf"
+ )
+ assert "NTP=172.16.15.14" in contents
+
+
+EMPTY_NTP = """\
+#cloud-config
+ntp:
+ ntp_client: ntp
+ pools: []
+ servers: []
+"""
+
+
+@pytest.mark.user_data(EMPTY_NTP)
+def test_empty_ntp(client: IntegrationInstance):
+ assert client.execute("ntpd --version").ok
+ assert client.execute("test -f /etc/ntp.conf.dist").failed
+ assert "pool.ntp.org iburst" in client.execute(
+ 'grep -v "^#" /etc/ntp.conf'
+ )
diff --git a/tests/integration_tests/modules/test_package_update_upgrade_install.py b/tests/integration_tests/modules/test_package_update_upgrade_install.py
index 8a38ad84..d668d81c 100644
--- a/tests/integration_tests/modules/test_package_update_upgrade_install.py
+++ b/tests/integration_tests/modules/test_package_update_upgrade_install.py
@@ -13,8 +13,8 @@ NOTE: the testcase for this looks for the command in history.log as
"""
import re
-import pytest
+import pytest
USER_DATA = """\
#cloud-config
@@ -26,9 +26,9 @@ package_upgrade: true
"""
+@pytest.mark.ubuntu
@pytest.mark.user_data(USER_DATA)
class TestPackageUpdateUpgradeInstall:
-
def assert_package_installed(self, pkg_out, name, version=None):
"""Check dpkg-query --show output for matching package name.
@@ -37,7 +37,8 @@ class TestPackageUpdateUpgradeInstall:
version.
"""
pkg_match = re.search(
- "^%s\t(?P<version>.*)$" % name, pkg_out, re.MULTILINE)
+ "^%s\t(?P<version>.*)$" % name, pkg_out, re.MULTILINE
+ )
if pkg_match:
installed_version = pkg_match.group("version")
if not version:
@@ -45,8 +46,10 @@ class TestPackageUpdateUpgradeInstall:
if installed_version.startswith(version):
return # Success
raise AssertionError(
- "Expected package version %s-%s not found. Found %s" %
- name, version, installed_version)
+ "Expected package version %s-%s not found. Found %s" % name,
+ version,
+ installed_version,
+ )
raise AssertionError("Package not installed: %s" % name)
def test_new_packages_are_installed(self, class_client):
@@ -57,11 +60,13 @@ class TestPackageUpdateUpgradeInstall:
def test_packages_were_updated(self, class_client):
out = class_client.execute(
- "grep ^Commandline: /var/log/apt/history.log")
+ "grep ^Commandline: /var/log/apt/history.log"
+ )
assert (
"Commandline: /usr/bin/apt-get --option=Dpkg::Options"
"::=--force-confold --option=Dpkg::options::=--force-unsafe-io "
- "--assume-yes --quiet install sl tree") in out
+ "--assume-yes --quiet install sl tree" in out
+ )
def test_packages_were_upgraded(self, class_client):
"""Test cloud-init-output for install & upgrade stuff."""
diff --git a/tests/integration_tests/modules/test_persistence.py b/tests/integration_tests/modules/test_persistence.py
new file mode 100644
index 00000000..33527e1e
--- /dev/null
+++ b/tests/integration_tests/modules/test_persistence.py
@@ -0,0 +1,32 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+"""Test the behavior of loading/discarding pickle data"""
+from pathlib import Path
+
+import pytest
+
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import (
+ ASSETS_DIR,
+ verify_ordered_items_in_text,
+)
+
+PICKLE_PATH = Path("/var/lib/cloud/instance/obj.pkl")
+TEST_PICKLE = ASSETS_DIR / "trusty_with_mime.pkl"
+
+
+@pytest.mark.lxd_container
+def test_log_message_on_missing_version_file(client: IntegrationInstance):
+ client.push_file(TEST_PICKLE, PICKLE_PATH)
+ client.restart()
+ assert client.execute("cloud-init status --wait").ok
+ log = client.read_from_file("/var/log/cloud-init.log")
+ verify_ordered_items_in_text(
+ [
+ "Unable to unpickle datasource: 'MIMEMultipart' object has no "
+ "attribute 'policy'. Ignoring current cache.",
+ "no cache found",
+ "Searching for local data source",
+ "SUCCESS: found local data from DataSourceNoCloud",
+ ],
+ log,
+ )
diff --git a/tests/integration_tests/modules/test_power_state_change.py b/tests/integration_tests/modules/test_power_state_change.py
new file mode 100644
index 00000000..5cd19764
--- /dev/null
+++ b/tests/integration_tests/modules/test_power_state_change.py
@@ -0,0 +1,97 @@
+"""Integration test of the cc_power_state_change module.
+
+Test that the power state config options work as expected.
+"""
+
+import time
+
+import pytest
+
+from tests.integration_tests.clouds import IntegrationCloud
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import verify_ordered_items_in_text
+
+USER_DATA = """\
+#cloud-config
+power_state:
+ delay: {delay}
+ mode: {mode}
+ message: msg
+ timeout: {timeout}
+ condition: {condition}
+"""
+
+
+def _detect_reboot(instance: IntegrationInstance):
+ # We'll wait for instance up here, but we don't know if we're
+ # detecting the first boot or second boot, so we also check
+ # the logs to ensure we've booted twice. If the logs show we've
+ # only booted once, wait until we've booted twice
+ instance.instance.wait()
+ for _ in range(600):
+ try:
+ log = instance.read_from_file("/var/log/cloud-init.log")
+ boot_count = log.count("running 'init-local'")
+ if boot_count == 1:
+ instance.instance.wait()
+ elif boot_count > 1:
+ break
+ except Exception:
+ pass
+ time.sleep(1)
+ else:
+ raise Exception("Could not detect reboot")
+
+
+def _can_connect(instance):
+ return instance.execute("true").ok
+
+
+# This test is marked unstable because even though it should be able to
+# run anywhere, I can only get it to run in an lxd container, and even then
+# occasionally some timing issues will crop up.
+@pytest.mark.unstable
+@pytest.mark.ubuntu
+@pytest.mark.lxd_container
+class TestPowerChange:
+ @pytest.mark.parametrize(
+ "mode,delay,timeout,expected",
+ [
+ ("poweroff", "now", "10", "will execute: shutdown -P now msg"),
+ ("reboot", "now", "0", "will execute: shutdown -r now msg"),
+ ("halt", "+1", "0", "will execute: shutdown -H +1 msg"),
+ ],
+ )
+ def test_poweroff(
+ self, session_cloud: IntegrationCloud, mode, delay, timeout, expected
+ ):
+ with session_cloud.launch(
+ user_data=USER_DATA.format(
+ delay=delay, mode=mode, timeout=timeout, condition="true"
+ ),
+ launch_kwargs={"wait": False},
+ ) as instance:
+ if mode == "reboot":
+ _detect_reboot(instance)
+ else:
+ instance.instance.wait_for_stop()
+ instance.instance.start(wait=True)
+ log = instance.read_from_file("/var/log/cloud-init.log")
+ assert _can_connect(instance)
+ lines_to_check = [
+ "Running module power-state-change",
+ expected,
+ "running 'init-local'",
+ "config-power-state-change already ran",
+ ]
+ verify_ordered_items_in_text(lines_to_check, log)
+
+ @pytest.mark.user_data(
+ USER_DATA.format(
+ delay="0", mode="poweroff", timeout="0", condition="false"
+ )
+ )
+ def test_poweroff_false_condition(self, client: IntegrationInstance):
+ log = client.read_from_file("/var/log/cloud-init.log")
+ assert _can_connect(client)
+ assert "Condition was false. Will not perform state change" in log
diff --git a/tests/integration_tests/modules/test_puppet.py b/tests/integration_tests/modules/test_puppet.py
new file mode 100644
index 00000000..1bd9cee4
--- /dev/null
+++ b/tests/integration_tests/modules/test_puppet.py
@@ -0,0 +1,39 @@
+"""Test installation configuration of puppet module."""
+import pytest
+
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import verify_clean_log
+
+SERVICE_DATA = """\
+#cloud-config
+puppet:
+ install: true
+ install_type: packages
+"""
+
+
+@pytest.mark.user_data(SERVICE_DATA)
+def test_puppet_service(client: IntegrationInstance):
+ """Basic test that puppet gets installed and runs."""
+ log = client.read_from_file("/var/log/cloud-init.log")
+ verify_clean_log(log)
+ assert client.execute("systemctl is-active puppet").ok
+ assert "Running command ['puppet', 'agent'" not in log
+
+
+EXEC_DATA = """\
+#cloud-config
+puppet:
+ install: true
+ install_type: packages
+ exec: true
+ exec_args: ['--noop']
+"""
+
+
+@pytest.mark.user_data
+@pytest.mark.user_data(EXEC_DATA)
+def test_pupet_exec(client: IntegrationInstance):
+ """Basic test that puppet gets installed and runs."""
+ log = client.read_from_file("/var/log/cloud-init.log")
+ assert "Running command ['puppet', 'agent', '--noop']" in log
diff --git a/tests/integration_tests/modules/test_runcmd.py b/tests/integration_tests/modules/test_runcmd.py
deleted file mode 100644
index 50d1851e..00000000
--- a/tests/integration_tests/modules/test_runcmd.py
+++ /dev/null
@@ -1,25 +0,0 @@
-"""Integration test for the runcmd module.
-
-This test specifies a command to be executed by the ``runcmd`` module
-and then checks if that command was executed during boot.
-
-(This is ported from
-``tests/cloud_tests/testcases/modules/runcmd.yaml``.)"""
-
-import pytest
-
-
-USER_DATA = """\
-#cloud-config
-runcmd:
- - echo cloud-init run cmd test > /var/tmp/run_cmd
-"""
-
-
-@pytest.mark.ci
-class TestRuncmd:
-
- @pytest.mark.user_data(USER_DATA)
- def test_runcmd(self, client):
- runcmd_output = client.read_from_file("/var/tmp/run_cmd")
- assert runcmd_output.strip() == "cloud-init run cmd test"
diff --git a/tests/integration_tests/modules/test_seed_random_data.py b/tests/integration_tests/modules/test_seed_random_data.py
deleted file mode 100644
index b365fa98..00000000
--- a/tests/integration_tests/modules/test_seed_random_data.py
+++ /dev/null
@@ -1,28 +0,0 @@
-"""Integration test for the random seed module.
-
-This test specifies a command to be executed by the ``seed_random`` module, by
-providing a different data to be used as seed data. We will then check
-if that seed data was actually used.
-
-(This is ported from
-``tests/cloud_tests/testcases/modules/seed_random_data.yaml``.)"""
-
-import pytest
-
-
-USER_DATA = """\
-#cloud-config
-random_seed:
- data: 'MYUb34023nD:LFDK10913jk;dfnk:Df'
- encoding: raw
- file: /root/seed
-"""
-
-
-@pytest.mark.ci
-class TestSeedRandomData:
-
- @pytest.mark.user_data(USER_DATA)
- def test_seed_random_data(self, client):
- seed_output = client.read_from_file("/root/seed")
- assert seed_output.strip() == "MYUb34023nD:LFDK10913jk;dfnk:Df"
diff --git a/tests/integration_tests/modules/test_set_hostname.py b/tests/integration_tests/modules/test_set_hostname.py
index 2bfa403d..ae0aeae9 100644
--- a/tests/integration_tests/modules/test_set_hostname.py
+++ b/tests/integration_tests/modules/test_set_hostname.py
@@ -11,7 +11,6 @@ after the system is boot.
import pytest
-
USER_DATA_HOSTNAME = """\
#cloud-config
hostname: cloudinit2
@@ -24,15 +23,31 @@ hostname: cloudinit1
fqdn: cloudinit2.i9n.cloud-init.io
"""
+USER_DATA_PREFER_FQDN = """\
+#cloud-config
+prefer_fqdn_over_hostname: {}
+hostname: cloudinit1
+fqdn: cloudinit2.test.io
+"""
+
@pytest.mark.ci
class TestHostname:
-
@pytest.mark.user_data(USER_DATA_HOSTNAME)
def test_hostname(self, client):
hostname_output = client.execute("hostname")
assert "cloudinit2" in hostname_output.strip()
+ @pytest.mark.user_data(USER_DATA_PREFER_FQDN.format(True))
+ def test_prefer_fqdn(self, client):
+ hostname_output = client.execute("hostname")
+ assert "cloudinit2.test.io" in hostname_output.strip()
+
+ @pytest.mark.user_data(USER_DATA_PREFER_FQDN.format(False))
+ def test_prefer_short_hostname(self, client):
+ hostname_output = client.execute("hostname")
+ assert "cloudinit1" in hostname_output.strip()
+
@pytest.mark.user_data(USER_DATA_FQDN)
def test_hostname_and_fqdn(self, client):
hostname_output = client.execute("hostname")
@@ -42,6 +57,8 @@ class TestHostname:
assert "cloudinit2.i9n.cloud-init.io" in fqdn_output.strip()
host_output = client.execute("grep ^127 /etc/hosts")
- assert '127.0.1.1 {} {}'.format(
- fqdn_output, hostname_output) in host_output
- assert '127.0.0.1 localhost' in host_output
+ assert (
+ "127.0.1.1 {} {}".format(fqdn_output, hostname_output)
+ in host_output
+ )
+ assert "127.0.0.1 localhost" in host_output
diff --git a/tests/integration_tests/modules/test_set_password.py b/tests/integration_tests/modules/test_set_password.py
index b13f76fb..0e35cd26 100644
--- a/tests/integration_tests/modules/test_set_password.py
+++ b/tests/integration_tests/modules/test_set_password.py
@@ -8,11 +8,10 @@ other tests chpasswd's list being a string. Both expect the same results, so
they use a mixin to share their test definitions, because we can (of course)
only specify one user-data per instance.
"""
-import crypt
-
import pytest
import yaml
+from tests.integration_tests.util import retry
COMMON_USER_DATA = """\
#cloud-config
@@ -40,7 +39,9 @@ Uh69tP4GSrGW5XKHxMLiKowJgm/"
lock_passwd: false
"""
-LIST_USER_DATA = COMMON_USER_DATA + """
+LIST_USER_DATA = (
+ COMMON_USER_DATA
+ + """
chpasswd:
list:
- tom:mypassword123!
@@ -48,8 +49,11 @@ chpasswd:
- harry:RANDOM
- mikey:$5$xZ$B2YGGEx2AOf4PeW48KC6.QyT1W2B4rZ9Qbltudtha89
"""
+)
-STRING_USER_DATA = COMMON_USER_DATA + """
+STRING_USER_DATA = (
+ COMMON_USER_DATA
+ + """
chpasswd:
list: |
tom:mypassword123!
@@ -57,6 +61,7 @@ chpasswd:
harry:RANDOM
mikey:$5$xZ$B2YGGEx2AOf4PeW48KC6.QyT1W2B4rZ9Qbltudtha89
"""
+)
USERS_DICTS = yaml.safe_load(COMMON_USER_DATA)["users"]
USERS_PASSWD_VALUES = {
@@ -116,14 +121,52 @@ class Mixin:
# Which are not the same
assert shadow_users["harry"] != shadow_users["dick"]
+ def test_random_passwords_not_stored_in_cloud_init_output_log(
+ self, class_client
+ ):
+ """We should not emit passwords to the in-instance log file.
+
+ LP: #1918303
+ """
+ cloud_init_output = class_client.read_from_file(
+ "/var/log/cloud-init-output.log"
+ )
+ assert "dick:" not in cloud_init_output
+ assert "harry:" not in cloud_init_output
+
+ @retry(tries=30, delay=1)
+ def test_random_passwords_emitted_to_serial_console(self, class_client):
+ """We should emit passwords to the serial console. (LP: #1918303)"""
+ try:
+ console_log = class_client.instance.console_log()
+ except NotImplementedError:
+ # Assume that an exception here means that we can't use the console
+ # log
+ pytest.skip("NotImplementedError when requesting console log")
+ return
+ if console_log.lower() == "no console output":
+ # This test retries because we might not have the full console log
+ # on the first fetch. However, if we have no console output
+ # at all, we don't want to keep retrying as that would trigger
+ # another 5 minute wait on the pycloudlib side, which could
+ # leave us waiting for a couple hours
+ pytest.fail("no console output")
+ return
+ assert "dick:" in console_log
+ assert "harry:" in console_log
+
def test_explicit_password_set_correctly(self, class_client):
"""Test that an explicitly-specified password is set correctly."""
shadow_users, _ = self._fetch_and_parse_etc_shadow(class_client)
fmt_and_salt = shadow_users["tom"].rsplit("$", 1)[0]
- expected_value = crypt.crypt("mypassword123!", fmt_and_salt)
-
- assert expected_value == shadow_users["tom"]
+ GEN_CRYPT_CONTENT = (
+ "import crypt\n"
+ f"print(crypt.crypt('mypassword123!', '{fmt_and_salt}'))\n"
+ )
+ class_client.write_to_file("/gen_crypt.py", GEN_CRYPT_CONTENT)
+ result = class_client.execute("python3 /gen_crypt.py")
+ assert result.stdout == shadow_users["tom"]
def test_shadow_expected_users(self, class_client):
"""Test that the right set of users is in /etc/shadow."""
diff --git a/tests/integration_tests/modules/test_snap.py b/tests/integration_tests/modules/test_snap.py
deleted file mode 100644
index b626f6b0..00000000
--- a/tests/integration_tests/modules/test_snap.py
+++ /dev/null
@@ -1,29 +0,0 @@
-"""Integration test for the snap module.
-
-This test specifies a command to be executed by the ``snap`` module
-and then checks that if that command was executed during boot.
-
-(This is ported from
-``tests/cloud_tests/testcases/modules/runcmd.yaml``.)"""
-
-import pytest
-
-
-USER_DATA = """\
-#cloud-config
-package_update: true
-snap:
- squashfuse_in_container: true
- commands:
- - snap install hello-world
-"""
-
-
-@pytest.mark.ci
-class TestSnap:
-
- @pytest.mark.user_data(USER_DATA)
- def test_snap(self, client):
- snap_output = client.execute("snap list")
- assert "core " in snap_output
- assert "hello-world " in snap_output
diff --git a/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py b/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py
index b9b0d85e..89b49576 100644
--- a/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py
+++ b/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py
@@ -12,13 +12,14 @@ import re
import pytest
+from tests.integration_tests.util import retry
USER_DATA_SSH_AUTHKEY_DISABLE = """\
#cloud-config
no_ssh_fingerprints: true
"""
-USER_DATA_SSH_AUTHKEY_ENABLE="""\
+USER_DATA_SSH_AUTHKEY_ENABLE = """\
#cloud-config
ssh_genkeytypes:
- ecdsa
@@ -30,19 +31,22 @@ ssh_authorized_keys:
@pytest.mark.ci
class TestSshAuthkeyFingerprints:
-
@pytest.mark.user_data(USER_DATA_SSH_AUTHKEY_DISABLE)
def test_ssh_authkey_fingerprints_disable(self, client):
cloudinit_output = client.read_from_file("/var/log/cloud-init.log")
assert (
"Skipping module named ssh-authkey-fingerprints, "
- "logging of SSH fingerprints disabled") in cloudinit_output
+ "logging of SSH fingerprints disabled" in cloudinit_output
+ )
+ # retry decorator here because it can take some time to be reflected
+ # in syslog
+ @retry(tries=30, delay=1)
@pytest.mark.user_data(USER_DATA_SSH_AUTHKEY_ENABLE)
def test_ssh_authkey_fingerprints_enable(self, client):
syslog_output = client.read_from_file("/var/log/syslog")
- assert re.search(r'256 SHA256:.*(ECDSA)', syslog_output) is not None
- assert re.search(r'256 SHA256:.*(ED25519)', syslog_output) is not None
- assert re.search(r'1024 SHA256:.*(DSA)', syslog_output) is None
- assert re.search(r'2048 SHA256:.*(RSA)', syslog_output) is None
+ assert re.search(r"256 SHA256:.*(ECDSA)", syslog_output) is not None
+ assert re.search(r"256 SHA256:.*(ED25519)", syslog_output) is not None
+ assert re.search(r"1024 SHA256:.*(DSA)", syslog_output) is None
+ assert re.search(r"2048 SHA256:.*(RSA)", syslog_output) is None
diff --git a/tests/integration_tests/modules/test_ssh_generate.py b/tests/integration_tests/modules/test_ssh_generate.py
index 60c36982..1dd0adf1 100644
--- a/tests/integration_tests/modules/test_ssh_generate.py
+++ b/tests/integration_tests/modules/test_ssh_generate.py
@@ -10,7 +10,6 @@ keys were created.
import pytest
-
USER_DATA = """\
#cloud-config
ssh_genkeytypes:
@@ -23,28 +22,27 @@ authkey_hash: sha512
@pytest.mark.ci
@pytest.mark.user_data(USER_DATA)
class TestSshKeysGenerate:
-
@pytest.mark.parametrize(
- "ssh_key_path", (
+ "ssh_key_path",
+ (
"/etc/ssh/ssh_host_dsa_key.pub",
"/etc/ssh/ssh_host_dsa_key",
"/etc/ssh/ssh_host_rsa_key.pub",
"/etc/ssh/ssh_host_rsa_key",
- )
+ ),
)
def test_ssh_keys_not_generated(self, ssh_key_path, class_client):
- out = class_client.execute(
- "test -e {}".format(ssh_key_path)
- )
+ out = class_client.execute("test -e {}".format(ssh_key_path))
assert out.failed
@pytest.mark.parametrize(
- "ssh_key_path", (
+ "ssh_key_path",
+ (
"/etc/ssh/ssh_host_ecdsa_key.pub",
"/etc/ssh/ssh_host_ecdsa_key",
"/etc/ssh/ssh_host_ed25519_key.pub",
"/etc/ssh/ssh_host_ed25519_key",
- )
+ ),
)
def test_ssh_keys_generated(self, ssh_key_path, class_client):
out = class_client.read_from_file(ssh_key_path)
diff --git a/tests/integration_tests/modules/test_ssh_import_id.py b/tests/integration_tests/modules/test_ssh_import_id.py
deleted file mode 100644
index 45d37d6c..00000000
--- a/tests/integration_tests/modules/test_ssh_import_id.py
+++ /dev/null
@@ -1,29 +0,0 @@
-"""Integration test for the ssh_import_id module.
-
-This test specifies ssh keys to be imported by the ``ssh_import_id`` module
-and then checks that if the ssh keys were successfully imported.
-
-(This is ported from
-``tests/cloud_tests/testcases/modules/ssh_import_id.yaml``.)"""
-
-import pytest
-
-
-USER_DATA = """\
-#cloud-config
-ssh_import_id:
- - gh:powersj
- - lp:smoser
-"""
-
-
-@pytest.mark.ci
-class TestSshImportId:
-
- @pytest.mark.user_data(USER_DATA)
- def test_ssh_import_id(self, client):
- ssh_output = client.read_from_file(
- "/home/ubuntu/.ssh/authorized_keys")
-
- assert '# ssh-import-id gh:powersj' in ssh_output
- assert '# ssh-import-id lp:smoser' in ssh_output
diff --git a/tests/integration_tests/modules/test_ssh_keys_provided.py b/tests/integration_tests/modules/test_ssh_keys_provided.py
index 27d193c1..b79f18eb 100644
--- a/tests/integration_tests/modules/test_ssh_keys_provided.py
+++ b/tests/integration_tests/modules/test_ssh_keys_provided.py
@@ -9,7 +9,6 @@ system.
import pytest
-
USER_DATA = """\
#cloud-config
disable_root: false
@@ -82,67 +81,60 @@ ssh_keys:
@pytest.mark.ci
@pytest.mark.user_data(USER_DATA)
class TestSshKeysProvided:
-
- def test_ssh_dsa_keys_provided(self, class_client):
- """Test dsa public key was imported."""
- out = class_client.read_from_file("/etc/ssh/ssh_host_dsa_key.pub")
- assert (
- "AAAAB3NzaC1kc3MAAACBAPkWy1zbchVIN7qTgM0/yyY8q4R"
- "ZS8cNM4ZpeuE5UB/Nnr6OSU/nmbO8LuM") in out
-
- """Test dsa private key was imported."""
- out = class_client.read_from_file("/etc/ssh/ssh_host_dsa_key")
- assert (
- "MIIBuwIBAAKBgQD5Fstc23IVSDe6k4DNP8smPKuEWUvHDTOGaXr"
- "hOVAfzZ6+jklP") in out
-
- def test_ssh_rsa_keys_provided(self, class_client):
- """Test rsa public key was imported."""
- out = class_client.read_from_file("/etc/ssh/ssh_host_rsa_key.pub")
- assert (
- "AAAAB3NzaC1yc2EAAAADAQABAAABAQC0/Ho+o3eJISydO2JvIgT"
- "LnZOtrxPl+fSvJfKDjoOLY0HB2eOjy2s2/2N6d9X9SGZ4") in out
-
- """Test rsa private key was imported."""
- out = class_client.read_from_file("/etc/ssh/ssh_host_rsa_key")
- assert (
- "4DOkqNiUGl80Zp1RgZNohHUXlJMtAbrIlAVEk+mTmg7vjfyp2un"
- "RQvLZpMRdywBm") in out
-
- def test_ssh_rsa_certificate_provided(self, class_client):
- """Test rsa certificate was imported."""
- out = class_client.read_from_file("/etc/ssh/ssh_host_rsa_key-cert.pub")
- assert (
- "AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgMpg"
- "BP4Phn3L8I7Vqh7lmHKcOfIokEvSEbHDw83Y3JloAAAAD") in out
-
- def test_ssh_certificate_updated_sshd_config(self, class_client):
- """Test ssh certificate was added to /etc/ssh/sshd_config."""
- out = class_client.read_from_file("/etc/ssh/sshd_config").strip()
- assert "HostCertificate /etc/ssh/ssh_host_rsa_key-cert.pub" in out
-
- def test_ssh_ecdsa_keys_provided(self, class_client):
- """Test ecdsa public key was imported."""
- out = class_client.read_from_file("/etc/ssh/ssh_host_ecdsa_key.pub")
- assert (
- "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAAB"
- "BBFsS5Tvky/IC/dXhE/afxxU") in out
-
- """Test ecdsa private key generated."""
- out = class_client.read_from_file("/etc/ssh/ssh_host_ecdsa_key")
- assert (
- "AwEHoUQDQgAEWxLlO+TL8gL91eET9p/HFQbqR1A691AkJgZk3jY"
- "5mpZqxgX4vcgb") in out
-
- def test_ssh_ed25519_keys_provided(self, class_client):
- """Test ed25519 public key was imported."""
- out = class_client.read_from_file("/etc/ssh/ssh_host_ed25519_key.pub")
- assert (
- "AAAAC3NzaC1lZDI1NTE5AAAAINudAZSu4vjZpVWzId5pXmZg1M6"
- "G15dqjQ2XkNVOEnb5") in out
-
- """Test ed25519 private key was imported."""
- out = class_client.read_from_file("/etc/ssh/ssh_host_ed25519_key")
- assert (
- "XAAAAAtzc2gtZWQyNTUxOQAAACDbnQGUruL42aVVsyHeaV5mYNT"
- "OhteXao0Nl5DVThJ2+Q") in out
+ @pytest.mark.parametrize(
+ "config_path,expected_out",
+ (
+ (
+ "/etc/ssh/ssh_host_dsa_key.pub",
+ "AAAAB3NzaC1kc3MAAACBAPkWy1zbchVIN7qTgM0/yyY8q4R"
+ "ZS8cNM4ZpeuE5UB/Nnr6OSU/nmbO8LuM",
+ ),
+ (
+ "/etc/ssh/ssh_host_dsa_key",
+ "MIIBuwIBAAKBgQD5Fstc23IVSDe6k4DNP8smPKuEWUvHDTOGaXr"
+ "hOVAfzZ6+jklP",
+ ),
+ (
+ "/etc/ssh/ssh_host_rsa_key.pub",
+ "AAAAB3NzaC1yc2EAAAADAQABAAABAQC0/Ho+o3eJISydO2JvIgT"
+ "LnZOtrxPl+fSvJfKDjoOLY0HB2eOjy2s2/2N6d9X9SGZ4",
+ ),
+ (
+ "/etc/ssh/ssh_host_rsa_key",
+ "4DOkqNiUGl80Zp1RgZNohHUXlJMtAbrIlAVEk+mTmg7vjfyp2un"
+ "RQvLZpMRdywBm",
+ ),
+ (
+ "/etc/ssh/ssh_host_rsa_key-cert.pub",
+ "AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgMpg"
+ "BP4Phn3L8I7Vqh7lmHKcOfIokEvSEbHDw83Y3JloAAAAD",
+ ),
+ (
+ "/etc/ssh/sshd_config",
+ "HostCertificate /etc/ssh/ssh_host_rsa_key-cert.pub",
+ ),
+ (
+ "/etc/ssh/ssh_host_ecdsa_key.pub",
+ "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAAB"
+ "BBFsS5Tvky/IC/dXhE/afxxU",
+ ),
+ (
+ "/etc/ssh/ssh_host_ecdsa_key",
+ "AwEHoUQDQgAEWxLlO+TL8gL91eET9p/HFQbqR1A691AkJgZk3jY"
+ "5mpZqxgX4vcgb",
+ ),
+ (
+ "/etc/ssh/ssh_host_ed25519_key.pub",
+ "AAAAC3NzaC1lZDI1NTE5AAAAINudAZSu4vjZpVWzId5pXmZg1M6"
+ "G15dqjQ2XkNVOEnb5",
+ ),
+ (
+ "/etc/ssh/ssh_host_ed25519_key",
+ "XAAAAAtzc2gtZWQyNTUxOQAAACDbnQGUruL42aVVsyHeaV5mYNT"
+ "OhteXao0Nl5DVThJ2+Q",
+ ),
+ ),
+ )
+ def test_ssh_provided_keys(self, config_path, expected_out, class_client):
+ out = class_client.read_from_file(config_path).strip()
+ assert expected_out in out
diff --git a/tests/integration_tests/modules/test_ssh_keysfile.py b/tests/integration_tests/modules/test_ssh_keysfile.py
new file mode 100644
index 00000000..8330a1ce
--- /dev/null
+++ b/tests/integration_tests/modules/test_ssh_keysfile.py
@@ -0,0 +1,224 @@
+from io import StringIO
+
+import paramiko
+import pytest
+from paramiko.ssh_exception import SSHException
+
+from tests.integration_tests.clouds import ImageSpecification
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import get_test_rsa_keypair
+
+TEST_USER1_KEYS = get_test_rsa_keypair("test1")
+TEST_USER2_KEYS = get_test_rsa_keypair("test2")
+TEST_DEFAULT_KEYS = get_test_rsa_keypair("test3")
+
+_USERDATA = """\
+#cloud-config
+bootcmd:
+ - {bootcmd}
+ssh_authorized_keys:
+ - {default}
+users:
+- default
+- name: test_user1
+ ssh_authorized_keys:
+ - {user1}
+- name: test_user2
+ ssh_authorized_keys:
+ - {user2}
+""".format(
+ bootcmd="{bootcmd}",
+ default=TEST_DEFAULT_KEYS.public_key,
+ user1=TEST_USER1_KEYS.public_key,
+ user2=TEST_USER2_KEYS.public_key,
+)
+
+
+def common_verify(client, expected_keys):
+ for user, filename, keys in expected_keys:
+ # Ensure key is in the key file
+ contents = client.read_from_file(filename)
+ if user in ["ubuntu", "root"]:
+ lines = contents.split("\n")
+ if user == "root":
+ # Our personal public key gets added by pycloudlib in
+ # addition to the default `ssh_authorized_keys`
+ assert len(lines) == 2
+ else:
+ # Clouds will insert the keys we've added to our accounts
+ # or for our launches
+ assert len(lines) >= 2
+ assert keys.public_key.strip() in contents
+ else:
+ assert contents.strip() == keys.public_key.strip()
+
+ # Ensure we can actually connect
+ ssh = paramiko.SSHClient()
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ paramiko_key = paramiko.RSAKey.from_private_key(
+ StringIO(keys.private_key)
+ )
+
+ # Will fail with AuthenticationException if
+ # we cannot connect
+ ssh.connect(
+ client.instance.ip,
+ username=user,
+ pkey=paramiko_key,
+ look_for_keys=False,
+ allow_agent=False,
+ )
+
+ # Ensure other uses can't connect using our key
+ other_users = [u[0] for u in expected_keys if u[2] != keys]
+ for other_user in other_users:
+ with pytest.raises(SSHException):
+ print(
+ "trying to connect as {} with key from {}".format(
+ other_user, user
+ )
+ )
+ ssh.connect(
+ client.instance.ip,
+ username=other_user,
+ pkey=paramiko_key,
+ look_for_keys=False,
+ allow_agent=False,
+ )
+
+ # Ensure we haven't messed with any /home permissions
+ # See LP: #1940233
+ home_dir = "/home/{}".format(user)
+ # Home permissions aren't consistent between releases. On ubuntu
+ # this can change to 750 once focal is unsupported.
+ if ImageSpecification.from_os_image().release in ("bionic", "focal"):
+ home_perms = "755"
+ else:
+ home_perms = "750"
+ if user == "root":
+ home_dir = "/root"
+ home_perms = "700"
+ assert "{} {}".format(user, home_perms) == client.execute(
+ 'stat -c "%U %a" {}'.format(home_dir)
+ )
+ if client.execute("test -d {}/.ssh".format(home_dir)).ok:
+ assert "{} 700".format(user) == client.execute(
+ 'stat -c "%U %a" {}/.ssh'.format(home_dir)
+ )
+ assert "{} 600".format(user) == client.execute(
+ 'stat -c "%U %a" {}'.format(filename)
+ )
+
+ # Also ensure ssh-keygen works as expected
+ client.execute("mkdir {}/.ssh".format(home_dir))
+ assert client.execute(
+ "ssh-keygen -b 2048 -t rsa -f {}/.ssh/id_rsa -q -N ''".format(
+ home_dir
+ )
+ ).ok
+ assert client.execute("test -f {}/.ssh/id_rsa".format(home_dir))
+ assert client.execute("test -f {}/.ssh/id_rsa.pub".format(home_dir))
+
+ assert "root 755" == client.execute('stat -c "%U %a" /home')
+
+
+DEFAULT_KEYS_USERDATA = _USERDATA.format(bootcmd='""')
+
+
+@pytest.mark.ubuntu
+@pytest.mark.user_data(DEFAULT_KEYS_USERDATA)
+def test_authorized_keys_default(client: IntegrationInstance):
+ expected_keys = [
+ (
+ "test_user1",
+ "/home/test_user1/.ssh/authorized_keys",
+ TEST_USER1_KEYS,
+ ),
+ (
+ "test_user2",
+ "/home/test_user2/.ssh/authorized_keys",
+ TEST_USER2_KEYS,
+ ),
+ ("ubuntu", "/home/ubuntu/.ssh/authorized_keys", TEST_DEFAULT_KEYS),
+ ("root", "/root/.ssh/authorized_keys", TEST_DEFAULT_KEYS),
+ ]
+ common_verify(client, expected_keys)
+
+
+AUTHORIZED_KEYS2_USERDATA = _USERDATA.format(
+ bootcmd=(
+ "sed -i 's;#AuthorizedKeysFile.*;AuthorizedKeysFile "
+ "/etc/ssh/authorized_keys %h/.ssh/authorized_keys2;' "
+ "/etc/ssh/sshd_config"
+ )
+)
+
+
+@pytest.mark.ubuntu
+@pytest.mark.user_data(AUTHORIZED_KEYS2_USERDATA)
+def test_authorized_keys2(client: IntegrationInstance):
+ expected_keys = [
+ (
+ "test_user1",
+ "/home/test_user1/.ssh/authorized_keys2",
+ TEST_USER1_KEYS,
+ ),
+ (
+ "test_user2",
+ "/home/test_user2/.ssh/authorized_keys2",
+ TEST_USER2_KEYS,
+ ),
+ ("ubuntu", "/home/ubuntu/.ssh/authorized_keys2", TEST_DEFAULT_KEYS),
+ ("root", "/root/.ssh/authorized_keys2", TEST_DEFAULT_KEYS),
+ ]
+ common_verify(client, expected_keys)
+
+
+NESTED_KEYS_USERDATA = _USERDATA.format(
+ bootcmd=(
+ "sed -i 's;#AuthorizedKeysFile.*;AuthorizedKeysFile "
+ "/etc/ssh/authorized_keys %h/foo/bar/ssh/keys;' "
+ "/etc/ssh/sshd_config"
+ )
+)
+
+
+@pytest.mark.ubuntu
+@pytest.mark.user_data(NESTED_KEYS_USERDATA)
+def test_nested_keys(client: IntegrationInstance):
+ expected_keys = [
+ ("test_user1", "/home/test_user1/foo/bar/ssh/keys", TEST_USER1_KEYS),
+ ("test_user2", "/home/test_user2/foo/bar/ssh/keys", TEST_USER2_KEYS),
+ ("ubuntu", "/home/ubuntu/foo/bar/ssh/keys", TEST_DEFAULT_KEYS),
+ ("root", "/root/foo/bar/ssh/keys", TEST_DEFAULT_KEYS),
+ ]
+ common_verify(client, expected_keys)
+
+
+EXTERNAL_KEYS_USERDATA = _USERDATA.format(
+ bootcmd=(
+ "sed -i 's;#AuthorizedKeysFile.*;AuthorizedKeysFile "
+ "/etc/ssh/authorized_keys /etc/ssh/authorized_keys/%u/keys;' "
+ "/etc/ssh/sshd_config"
+ )
+)
+
+
+@pytest.mark.ubuntu
+@pytest.mark.user_data(EXTERNAL_KEYS_USERDATA)
+def test_external_keys(client: IntegrationInstance):
+ expected_keys = [
+ (
+ "test_user1",
+ "/etc/ssh/authorized_keys/test_user1/keys",
+ TEST_USER1_KEYS,
+ ),
+ (
+ "test_user2",
+ "/etc/ssh/authorized_keys/test_user2/keys",
+ TEST_USER2_KEYS,
+ ),
+ ("ubuntu", "/etc/ssh/authorized_keys/ubuntu/keys", TEST_DEFAULT_KEYS),
+ ("root", "/etc/ssh/authorized_keys/root/keys", TEST_DEFAULT_KEYS),
+ ]
+ common_verify(client, expected_keys)
diff --git a/tests/integration_tests/modules/test_timezone.py b/tests/integration_tests/modules/test_timezone.py
deleted file mode 100644
index 111d53f7..00000000
--- a/tests/integration_tests/modules/test_timezone.py
+++ /dev/null
@@ -1,25 +0,0 @@
-"""Integration test for the timezone module.
-
-This test specifies a timezone to be used by the ``timezone`` module
-and then checks that if that timezone was respected during boot.
-
-(This is ported from
-``tests/cloud_tests/testcases/modules/timezone.yaml``.)"""
-
-import pytest
-
-
-USER_DATA = """\
-#cloud-config
-timezone: US/Aleutian
-"""
-
-
-@pytest.mark.ci
-class TestTimezone:
-
- @pytest.mark.user_data(USER_DATA)
- def test_timezone(self, client):
- timezone_output = client.execute(
- 'date "+%Z" --date="Thu, 03 Nov 2016 00:47:00 -0400"')
- assert timezone_output.strip() == "HDT"
diff --git a/tests/integration_tests/modules/test_user_events.py b/tests/integration_tests/modules/test_user_events.py
new file mode 100644
index 00000000..e4a4241f
--- /dev/null
+++ b/tests/integration_tests/modules/test_user_events.py
@@ -0,0 +1,110 @@
+"""Test user-overridable events.
+
+This is currently limited to applying network config on BOOT events.
+"""
+
+import re
+
+import pytest
+import yaml
+
+from tests.integration_tests.instances import IntegrationInstance
+
+
+def _add_dummy_bridge_to_netplan(client: IntegrationInstance):
+ # Update netplan configuration to ensure it doesn't change on reboot
+ netplan = yaml.safe_load(
+ client.execute("cat /etc/netplan/50-cloud-init.yaml")
+ )
+ # Just a dummy bridge to do nothing
+ try:
+ netplan["network"]["bridges"]["dummy0"] = {"dhcp4": False}
+ except KeyError:
+ netplan["network"]["bridges"] = {"dummy0": {"dhcp4": False}}
+
+ dumped_netplan = yaml.dump(netplan)
+ client.write_to_file("/etc/netplan/50-cloud-init.yaml", dumped_netplan)
+
+
+@pytest.mark.lxd_container
+@pytest.mark.lxd_vm
+@pytest.mark.ec2
+@pytest.mark.gce
+@pytest.mark.oci
+@pytest.mark.openstack
+def test_boot_event_disabled_by_default(client: IntegrationInstance):
+ log = client.read_from_file("/var/log/cloud-init.log")
+ if "network config is disabled" in log:
+ pytest.skip("network config disabled. Test doesn't apply")
+ assert "Applying network configuration" in log
+ assert "dummy0" not in client.execute("ls /sys/class/net")
+
+ _add_dummy_bridge_to_netplan(client)
+ client.execute("rm /var/log/cloud-init.log")
+
+ client.restart()
+ log2 = client.read_from_file("/var/log/cloud-init.log")
+
+ if "cache invalid in datasource" in log2:
+ # Invalid cache will get cleared, meaning we'll create a new
+ # "instance" and apply networking config, so events aren't
+ # really relevant here
+ pytest.skip("Test only valid for existing instances")
+
+ # We attempt to apply network config twice on every boot.
+ # Ensure neither time works.
+ assert 2 == len(
+ re.findall(
+ r"Event Denied: scopes=\['network'\] EventType=boot[^-]", log2
+ )
+ )
+ assert 2 == log2.count(
+ "Event Denied: scopes=['network'] EventType=boot-legacy"
+ )
+ assert 2 == log2.count(
+ "No network config applied. Neither a new instance"
+ " nor datasource network update allowed"
+ )
+
+ assert "dummy0" in client.execute("ls /sys/class/net")
+
+
+def _test_network_config_applied_on_reboot(client: IntegrationInstance):
+ log = client.read_from_file("/var/log/cloud-init.log")
+ if "network config is disabled" in log:
+ pytest.skip("network config disabled. Test doesn't apply")
+ assert "Applying network configuration" in log
+ assert "dummy0" not in client.execute("ls /sys/class/net")
+
+ _add_dummy_bridge_to_netplan(client)
+ client.execute('echo "" > /var/log/cloud-init.log')
+ client.restart()
+
+ log = client.read_from_file("/var/log/cloud-init.log")
+ if "cache invalid in datasource" in log:
+ # Invalid cache will get cleared, meaning we'll create a new
+ # "instance" and apply networking config, so events aren't
+ # really relevant here
+ pytest.skip("Test only valid for existing instances")
+
+ assert "Event Allowed: scope=network EventType=boot" in log
+ assert "Applying network configuration" in log
+ assert "dummy0" not in client.execute("ls /sys/class/net")
+
+
+@pytest.mark.azure
+def test_boot_event_enabled_by_default(client: IntegrationInstance):
+ _test_network_config_applied_on_reboot(client)
+
+
+USER_DATA = """\
+#cloud-config
+updates:
+ network:
+ when: [boot]
+"""
+
+
+@pytest.mark.user_data(USER_DATA)
+def test_boot_event_enabled(client: IntegrationInstance):
+ _test_network_config_applied_on_reboot(client)
diff --git a/tests/integration_tests/modules/test_users_groups.py b/tests/integration_tests/modules/test_users_groups.py
index 6a51f5a6..fddff681 100644
--- a/tests/integration_tests/modules/test_users_groups.py
+++ b/tests/integration_tests/modules/test_users_groups.py
@@ -1,12 +1,15 @@
-"""Integration test for the user_groups module.
+"""Integration tests for the user_groups module.
-This test specifies a number of users and groups via user-data, and confirms
-that they have been configured correctly in the system under test.
+TODO:
+* This module assumes that the "ubuntu" user will be created when "default" is
+ specified; this will need modification to run on other OSes.
"""
import re
import pytest
+from tests.integration_tests.clouds import ImageSpecification
+from tests.integration_tests.instances import IntegrationInstance
USER_DATA = """\
#cloud-config
@@ -41,6 +44,13 @@ AHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/
@pytest.mark.ci
@pytest.mark.user_data(USER_DATA)
class TestUsersGroups:
+ """Test users and groups.
+
+ This test specifies a number of users and groups via user-data, and
+ confirms that they have been configured correctly in the system under test.
+ """
+
+ @pytest.mark.ubuntu
@pytest.mark.parametrize(
"getent_args,regex",
[
@@ -73,7 +83,9 @@ class TestUsersGroups:
assert re.search(regex, result.stdout) is not None, (
"'getent {}' resulted in '{}', "
"but expected to match regex {}".format(
- ' '.join(getent_args), result.stdout, regex))
+ " ".join(getent_args), result.stdout, regex
+ )
+ )
def test_user_root_in_secret(self, class_client):
"""Test root user is in 'secret' group."""
@@ -81,3 +93,33 @@ class TestUsersGroups:
_, groups_str = output.split(":", maxsplit=1)
groups = groups_str.split()
assert "secret" in groups
+
+
+@pytest.mark.user_data(USER_DATA)
+def test_sudoers_includedir(client: IntegrationInstance):
+ """Ensure we don't add additional #includedir to sudoers.
+
+ Newer versions of /etc/sudoers will use @includedir rather than
+ #includedir. Ensure we handle that properly and don't include an
+ additional #includedir when one isn't warranted.
+
+ https://github.com/canonical/cloud-init/pull/783
+ """
+ if ImageSpecification.from_os_image().release in [
+ "bionic",
+ "focal",
+ ]:
+ raise pytest.skip(
+ "Test requires version of sudo installed on groovy and later"
+ )
+ client.execute("sed -i 's/#include/@include/g' /etc/sudoers")
+
+ sudoers = client.read_from_file("/etc/sudoers")
+ if "@includedir /etc/sudoers.d" not in sudoers:
+ client.execute("echo '@includedir /etc/sudoers.d' >> /etc/sudoers")
+ client.instance.clean()
+ client.restart()
+ sudoers = client.read_from_file("/etc/sudoers")
+
+ assert "#includedir" not in sudoers
+ assert sudoers.count("includedir /etc/sudoers.d") == 1
diff --git a/tests/integration_tests/modules/test_version_change.py b/tests/integration_tests/modules/test_version_change.py
new file mode 100644
index 00000000..3168cd60
--- /dev/null
+++ b/tests/integration_tests/modules/test_version_change.py
@@ -0,0 +1,76 @@
+from pathlib import Path
+
+import pytest
+
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import ASSETS_DIR, verify_clean_log
+
+PICKLE_PATH = Path("/var/lib/cloud/instance/obj.pkl")
+TEST_PICKLE = ASSETS_DIR / "test_version_change.pkl"
+
+
+def _assert_no_pickle_problems(log):
+ assert "Failed loading pickled blob" not in log
+ verify_clean_log(log)
+
+
+def test_reboot_without_version_change(client: IntegrationInstance):
+ log = client.read_from_file("/var/log/cloud-init.log")
+ assert "Python version change detected" not in log
+ assert "Cache compatibility status is currently unknown." not in log
+ _assert_no_pickle_problems(log)
+
+ client.restart()
+ log = client.read_from_file("/var/log/cloud-init.log")
+ assert "Python version change detected" not in log
+ assert "Could not determine Python version used to write cache" not in log
+ _assert_no_pickle_problems(log)
+
+ # Now ensure that loading a bad pickle gives us problems
+ client.push_file(TEST_PICKLE, PICKLE_PATH)
+ client.restart()
+ log = client.read_from_file("/var/log/cloud-init.log")
+
+ # no cache found is an "expected" upgrade error, and
+ # "Failed" means we're unable to load the pickle
+ assert any(
+ [
+ "Failed loading pickled blob from {}".format(PICKLE_PATH) in log,
+ "no cache found" in log,
+ ]
+ )
+
+
+@pytest.mark.ec2
+@pytest.mark.gce
+@pytest.mark.oci
+@pytest.mark.openstack
+@pytest.mark.lxd_container
+@pytest.mark.lxd_vm
+# No Azure because the cache gets purged every reboot, so we'll never
+# get to the point where we need to purge cache due to version change
+def test_cache_purged_on_version_change(client: IntegrationInstance):
+ # Start by pushing the invalid pickle so we'll hit an error if the
+ # cache didn't actually get purged
+ client.push_file(TEST_PICKLE, PICKLE_PATH)
+ client.execute("echo '1.0' > /var/lib/cloud/data/python-version")
+ client.restart()
+ log = client.read_from_file("/var/log/cloud-init.log")
+ assert "Python version change detected. Purging cache" in log
+ _assert_no_pickle_problems(log)
+
+
+def test_log_message_on_missing_version_file(client: IntegrationInstance):
+ # Start by pushing a pickle so we can see the log message
+ client.push_file(TEST_PICKLE, PICKLE_PATH)
+ client.execute("rm /var/lib/cloud/data/python-version")
+ client.execute("rm /var/log/cloud-init.log")
+ client.restart()
+ log = client.read_from_file("/var/log/cloud-init.log")
+ if "no cache found" not in log:
+ # We don't expect the python version file to exist if we have no
+ # pre-existing cache
+ assert (
+ "Writing python-version file. "
+ "Cache compatibility status is currently unknown." in log
+ )
diff --git a/tests/integration_tests/modules/test_write_files.py b/tests/integration_tests/modules/test_write_files.py
index 15832ae3..1eb7e945 100644
--- a/tests/integration_tests/modules/test_write_files.py
+++ b/tests/integration_tests/modules/test_write_files.py
@@ -7,8 +7,8 @@ and then checks if those files were created during boot.
``tests/cloud_tests/testcases/modules/write_files.yaml``.)"""
import base64
-import pytest
+import pytest
ASCII_TEXT = "ASCII text"
B64_CONTENT = base64.b64encode(ASCII_TEXT.encode("utf-8"))
@@ -21,6 +21,9 @@ B64_CONTENT = base64.b64encode(ASCII_TEXT.encode("utf-8"))
#
USER_DATA = """\
#cloud-config
+users:
+- default
+- name: myuser
write_files:
- encoding: b64
content: {}
@@ -41,26 +44,50 @@ write_files:
H4sIAIDb/U8C/1NW1E/KzNMvzuBKTc7IV8hIzcnJVyjPL8pJ4QIA6N+MVxsAAAA=
path: /root/file_gzip
permissions: '0755'
-""".format(B64_CONTENT.decode("ascii"))
+- path: '/home/testuser/my-file'
+ content: |
+ echo 'hello world!'
+ defer: true
+ owner: 'myuser'
+ permissions: '0644'
+""".format(
+ B64_CONTENT.decode("ascii")
+)
@pytest.mark.ci
@pytest.mark.user_data(USER_DATA)
class TestWriteFiles:
-
@pytest.mark.parametrize(
- "cmd,expected_out", (
+ "cmd,expected_out",
+ (
("file /root/file_b64", ASCII_TEXT),
("md5sum </root/file_binary", "3801184b97bb8c6e63fa0e1eae2920d7"),
- ("sha256sum </root/file_binary", (
+ (
+ "sha256sum </root/file_binary",
"2c791c4037ea5bd7e928d6a87380f8ba"
- "7a803cd83d5e4f269e28f5090f0f2c9a"
- )),
- ("file /root/file_gzip",
- "POSIX shell script, ASCII text executable"),
+ "7a803cd83d5e4f269e28f5090f0f2c9a",
+ ),
+ (
+ "file /root/file_gzip",
+ "POSIX shell script, ASCII text executable",
+ ),
("file /root/file_text", ASCII_TEXT),
- )
+ ),
)
def test_write_files(self, cmd, expected_out, class_client):
out = class_client.execute(cmd)
assert expected_out in out
+
+ def test_write_files_deferred(self, class_client):
+ """Test that write files deferred works as expected.
+
+ Users get created after write_files module runs, so ensure that
+ with `defer: true`, the file gets written with correct ownership.
+ """
+ out = class_client.read_from_file("/home/testuser/my-file")
+ assert "echo 'hello world!'" == out
+ assert (
+ class_client.execute('stat -c "%U %a" /home/testuser/my-file')
+ == "myuser 644"
+ )
diff --git a/tests/integration_tests/network/test_net_config_load.py b/tests/integration_tests/network/test_net_config_load.py
new file mode 100644
index 00000000..a6863b63
--- /dev/null
+++ b/tests/integration_tests/network/test_net_config_load.py
@@ -0,0 +1,27 @@
+"""Test loading the network config"""
+import pytest
+
+from tests.integration_tests.instances import IntegrationInstance
+
+
+def _customize_envionment(client: IntegrationInstance):
+ # Insert our "disable_network_config" file here
+ client.write_to_file(
+ "/etc/cloud/cloud.cfg.d/99-disable-network-config.cfg",
+ "network: {config: disabled}\n",
+ )
+ client.execute("cloud-init clean --logs")
+ client.restart()
+
+
+def test_network_disabled_via_etc_cloud(client: IntegrationInstance):
+ """Test that network can be disabled via config file in /etc/cloud"""
+ if client.settings.CLOUD_INIT_SOURCE == "IN_PLACE":
+ pytest.skip(
+ "IN_PLACE not supported as we mount /etc/cloud contents into the "
+ "container"
+ )
+ _customize_envionment(client)
+
+ log = client.read_from_file("/var/log/cloud-init.log")
+ assert "network config is disabled by system_cfg" in log
diff --git a/tests/integration_tests/test_logging.py b/tests/integration_tests/test_logging.py
new file mode 100644
index 00000000..b31a0434
--- /dev/null
+++ b/tests/integration_tests/test_logging.py
@@ -0,0 +1,22 @@
+"""Integration tests relating to cloud-init's logging."""
+
+
+class TestVarLogCloudInitOutput:
+ """Integration tests relating to /var/log/cloud-init-output.log."""
+
+ def test_var_log_cloud_init_output_not_world_readable(self, client):
+ """
+ The log can contain sensitive data, it shouldn't be world-readable.
+
+ LP: #1918303
+ """
+ # Check the file exists
+ assert client.execute("test -f /var/log/cloud-init-output.log").ok
+
+ # Check its permissions are as we expect
+ perms, user, group = client.execute(
+ "stat -c %a:%U:%G /var/log/cloud-init-output.log"
+ ).split(":")
+ assert "640" == perms
+ assert "root" == user
+ assert "adm" == group
diff --git a/tests/integration_tests/test_shell_script_by_frequency.py b/tests/integration_tests/test_shell_script_by_frequency.py
new file mode 100644
index 00000000..25157722
--- /dev/null
+++ b/tests/integration_tests/test_shell_script_by_frequency.py
@@ -0,0 +1,48 @@
+"""Integration tests for various handlers."""
+
+from io import StringIO
+
+import pytest
+
+from cloudinit.cmd.devel.make_mime import create_mime_message
+from tests.integration_tests.instances import IntegrationInstance
+
+PER_FREQ_TEMPLATE = """\
+#!/bin/bash
+touch /tmp/test_per_freq_{}
+"""
+
+PER_ALWAYS_FILE = StringIO(PER_FREQ_TEMPLATE.format("always"))
+PER_INSTANCE_FILE = StringIO(PER_FREQ_TEMPLATE.format("instance"))
+PER_ONCE_FILE = StringIO(PER_FREQ_TEMPLATE.format("once"))
+
+FILES = [
+ (PER_ALWAYS_FILE, "always.sh", "x-shellscript-per-boot"),
+ (PER_INSTANCE_FILE, "instance.sh", "x-shellscript-per-instance"),
+ (PER_ONCE_FILE, "once.sh", "x-shellscript-per-once"),
+]
+
+USER_DATA, errors = create_mime_message(FILES)
+
+
+@pytest.mark.ci
+@pytest.mark.user_data(USER_DATA)
+def test_per_freq(client: IntegrationInstance):
+ # Sanity test for scripts folder
+ cmd = "test -d /var/lib/cloud/scripts"
+ assert client.execute(cmd).ok
+ # Test per-boot
+ cmd = "test -f /var/lib/cloud/scripts/per-boot/always.sh"
+ assert client.execute(cmd).ok
+ cmd = "test -f /tmp/test_per_freq_always"
+ assert client.execute(cmd).ok
+ # Test per-instance
+ cmd = "test -f /var/lib/cloud/scripts/per-instance/instance.sh"
+ assert client.execute(cmd).ok
+ cmd = "test -f /tmp/test_per_freq_instance"
+ assert client.execute(cmd).ok
+ # Test per-once
+ cmd = "test -f /var/lib/cloud/scripts/per-once/once.sh"
+ assert client.execute(cmd).ok
+ cmd = "test -f /tmp/test_per_freq_once"
+ assert client.execute(cmd).ok
diff --git a/tests/integration_tests/test_upgrade.py b/tests/integration_tests/test_upgrade.py
new file mode 100644
index 00000000..b13d4703
--- /dev/null
+++ b/tests/integration_tests/test_upgrade.py
@@ -0,0 +1,188 @@
+import json
+import logging
+import os
+
+import pytest
+
+from tests.integration_tests.clouds import ImageSpecification, IntegrationCloud
+from tests.integration_tests.conftest import get_validated_source
+from tests.integration_tests.util import verify_clean_log
+
+LOG = logging.getLogger("integration_testing.test_upgrade")
+
+LOG_TEMPLATE = """\n\
+=== `systemd-analyze` before:
+{pre_systemd_analyze}
+=== `systemd-analyze` after:
+{post_systemd_analyze}
+
+=== `systemd-analyze blame` before (first 10 lines):
+{pre_systemd_blame}
+=== `systemd-analyze blame` after (first 10 lines):
+{post_systemd_blame}
+
+=== `cloud-init analyze show` before:')
+{pre_analyze_totals}
+=== `cloud-init analyze show` after:')
+{post_analyze_totals}
+
+=== `cloud-init analyze blame` before (first 10 lines): ')
+{pre_cloud_blame}
+=== `cloud-init analyze blame` after (first 10 lines): ')
+{post_cloud_blame}
+"""
+
+UNSUPPORTED_INSTALL_METHOD_MSG = (
+ "Install method '{}' not supported for this test"
+)
+USER_DATA = """\
+#cloud-config
+hostname: SRU-worked
+"""
+
+
+def test_clean_boot_of_upgraded_package(session_cloud: IntegrationCloud):
+ source = get_validated_source(session_cloud)
+ if not source.installs_new_version():
+ pytest.skip(UNSUPPORTED_INSTALL_METHOD_MSG.format(source))
+ return # type checking doesn't understand that skip raises
+ if (
+ ImageSpecification.from_os_image().release == "bionic"
+ and session_cloud.settings.PLATFORM == "lxd_vm"
+ ):
+ # The issues that we see on Bionic VMs don't appear anywhere
+ # else, including when calling KVM directly. It likely has to
+ # do with the extra lxd-agent setup happening on bionic.
+ # Given that we still have Bionic covered on all other platforms,
+ # the risk of skipping bionic here seems low enough.
+ pytest.skip("Upgrade test doesn't run on LXD VMs and bionic")
+ return
+
+ launch_kwargs = {
+ "image_id": session_cloud.initial_image_id,
+ }
+
+ with session_cloud.launch(
+ launch_kwargs=launch_kwargs,
+ user_data=USER_DATA,
+ ) as instance:
+ # get pre values
+ pre_hostname = instance.execute("hostname")
+ pre_cloud_id = instance.execute("cloud-id")
+ pre_result = instance.execute("cat /run/cloud-init/result.json")
+ pre_network = instance.execute("cat /etc/netplan/50-cloud-init.yaml")
+ pre_systemd_analyze = instance.execute("systemd-analyze")
+ pre_systemd_blame = instance.execute("systemd-analyze blame")
+ pre_cloud_analyze = instance.execute("cloud-init analyze show")
+ pre_cloud_blame = instance.execute("cloud-init analyze blame")
+
+ # Ensure no issues pre-upgrade
+ log = instance.read_from_file("/var/log/cloud-init.log")
+ assert not json.loads(pre_result)["v1"]["errors"]
+
+ try:
+ verify_clean_log(log)
+ except AssertionError:
+ LOG.warning(
+ "There were errors/warnings/tracebacks pre-upgrade. "
+ "Any failures may be due to pre-upgrade problem"
+ )
+
+ # Upgrade
+ instance.install_new_cloud_init(source, take_snapshot=False)
+
+ # 'cloud-init init' helps us understand if our pickling upgrade paths
+ # have broken across re-constitution of a cached datasource. Some
+ # platforms invalidate their datasource cache on reboot, so we run
+ # it here to ensure we get a dirty run.
+ assert instance.execute("cloud-init init").ok
+
+ # Reboot
+ instance.execute("hostname something-else")
+ instance.restart()
+ assert instance.execute("cloud-init status --wait --long").ok
+
+ # get post values
+ post_hostname = instance.execute("hostname")
+ post_cloud_id = instance.execute("cloud-id")
+ post_result = instance.execute("cat /run/cloud-init/result.json")
+ post_network = instance.execute("cat /etc/netplan/50-cloud-init.yaml")
+ post_systemd_analyze = instance.execute("systemd-analyze")
+ post_systemd_blame = instance.execute("systemd-analyze blame")
+ post_cloud_analyze = instance.execute("cloud-init analyze show")
+ post_cloud_blame = instance.execute("cloud-init analyze blame")
+
+ # Ensure no issues post-upgrade
+ assert not json.loads(pre_result)["v1"]["errors"]
+
+ log = instance.read_from_file("/var/log/cloud-init.log")
+ verify_clean_log(log)
+
+ # Ensure important things stayed the same
+ assert pre_hostname == post_hostname
+ assert pre_cloud_id == post_cloud_id
+ try:
+ assert pre_result == post_result
+ except AssertionError:
+ if instance.settings.PLATFORM == "azure":
+ pre_json = json.loads(pre_result)
+ post_json = json.loads(post_result)
+ assert pre_json["v1"]["datasource"].startswith(
+ "DataSourceAzure"
+ )
+ assert post_json["v1"]["datasource"].startswith(
+ "DataSourceAzure"
+ )
+ assert pre_network == post_network
+
+ # Calculate and log all the boot numbers
+ pre_analyze_totals = [
+ x
+ for x in pre_cloud_analyze.splitlines()
+ if x.startswith("Finished stage") or x.startswith("Total Time")
+ ]
+ post_analyze_totals = [
+ x
+ for x in post_cloud_analyze.splitlines()
+ if x.startswith("Finished stage") or x.startswith("Total Time")
+ ]
+
+ # pylint: disable=logging-format-interpolation
+ LOG.info(
+ LOG_TEMPLATE.format(
+ pre_systemd_analyze=pre_systemd_analyze,
+ post_systemd_analyze=post_systemd_analyze,
+ pre_systemd_blame="\n".join(
+ pre_systemd_blame.splitlines()[:10]
+ ),
+ post_systemd_blame="\n".join(
+ post_systemd_blame.splitlines()[:10]
+ ),
+ pre_analyze_totals="\n".join(pre_analyze_totals),
+ post_analyze_totals="\n".join(post_analyze_totals),
+ pre_cloud_blame="\n".join(pre_cloud_blame.splitlines()[:10]),
+ post_cloud_blame="\n".join(post_cloud_blame.splitlines()[:10]),
+ )
+ )
+
+
+@pytest.mark.ci
+@pytest.mark.ubuntu
+def test_subsequent_boot_of_upgraded_package(session_cloud: IntegrationCloud):
+ source = get_validated_source(session_cloud)
+ if not source.installs_new_version():
+ if os.environ.get("TRAVIS"):
+ # If this isn't running on CI, we should know
+ pytest.fail(UNSUPPORTED_INSTALL_METHOD_MSG.format(source))
+ else:
+ pytest.skip(UNSUPPORTED_INSTALL_METHOD_MSG.format(source))
+ return # type checking doesn't understand that skip raises
+
+ launch_kwargs = {"image_id": session_cloud.initial_image_id}
+
+ with session_cloud.launch(launch_kwargs=launch_kwargs) as instance:
+ instance.install_new_cloud_init(
+ source, take_snapshot=False, clean=False
+ )
+ instance.restart()
+ assert instance.execute("cloud-init status --wait --long").ok
diff --git a/tests/integration_tests/util.py b/tests/integration_tests/util.py
new file mode 100644
index 00000000..31fe69c0
--- /dev/null
+++ b/tests/integration_tests/util.py
@@ -0,0 +1,142 @@
+import functools
+import logging
+import multiprocessing
+import os
+import time
+from collections import namedtuple
+from contextlib import contextmanager
+from pathlib import Path
+
+log = logging.getLogger("integration_testing")
+key_pair = namedtuple("key_pair", "public_key private_key")
+
+ASSETS_DIR = Path("tests/integration_tests/assets")
+KEY_PATH = ASSETS_DIR / "keys"
+
+
+def verify_ordered_items_in_text(to_verify: list, text: str):
+ """Assert all items in list appear in order in text.
+
+ Examples:
+ verify_ordered_items_in_text(['a', '1'], 'ab1') # passes
+ verify_ordered_items_in_text(['1', 'a'], 'ab1') # raises AssertionError
+ """
+ index = 0
+ for item in to_verify:
+ index = text[index:].find(item)
+ assert index > -1, "Expected item not found: '{}'".format(item)
+
+
+def verify_clean_log(log):
+ """Assert no unexpected tracebacks or warnings in logs"""
+ warning_count = log.count("WARN")
+ expected_warnings = 0
+ traceback_count = log.count("Traceback")
+ expected_tracebacks = 0
+
+ warning_texts = [
+ # Consistently on all Azure launches:
+ # azure.py[WARNING]: No lease found; using default endpoint
+ "No lease found; using default endpoint"
+ ]
+ traceback_texts = []
+ if "oracle" in log:
+ # LP: #1842752
+ lease_exists_text = "Stderr: RTNETLINK answers: File exists"
+ warning_texts.append(lease_exists_text)
+ traceback_texts.append(lease_exists_text)
+ # LP: #1833446
+ fetch_error_text = (
+ "UrlError: 404 Client Error: Not Found for url: "
+ "http://169.254.169.254/latest/meta-data/"
+ )
+ warning_texts.append(fetch_error_text)
+ traceback_texts.append(fetch_error_text)
+ # Oracle has a file in /etc/cloud/cloud.cfg.d that contains
+ # users:
+ # - default
+ # - name: opc
+ # ssh_redirect_user: true
+ # This can trigger a warning about opc having no public key
+ warning_texts.append(
+ "Unable to disable SSH logins for opc given ssh_redirect_user"
+ )
+
+ for warning_text in warning_texts:
+ expected_warnings += log.count(warning_text)
+ for traceback_text in traceback_texts:
+ expected_tracebacks += log.count(traceback_text)
+
+ assert warning_count == expected_warnings
+ assert traceback_count == expected_tracebacks
+
+
+@contextmanager
+def emit_dots_on_travis():
+ """emit a dot every 60 seconds if running on Travis.
+
+ Travis will kill jobs that don't emit output for a certain amount of time.
+ This context manager spins up a background process which will emit a dot to
+ stdout every 60 seconds to avoid being killed.
+
+ It should be wrapped selectively around operations that are known to take a
+ long time.
+ """
+ if os.environ.get("TRAVIS") != "true":
+ # If we aren't on Travis, don't do anything.
+ yield
+ return
+
+ def emit_dots():
+ while True:
+ log.info(".")
+ time.sleep(60)
+
+ dot_process = multiprocessing.Process(target=emit_dots)
+ dot_process.start()
+ try:
+ yield
+ finally:
+ dot_process.terminate()
+
+
+def get_test_rsa_keypair(key_name: str = "test1") -> key_pair:
+ private_key_path = KEY_PATH / "id_rsa.{}".format(key_name)
+ public_key_path = KEY_PATH / "id_rsa.{}.pub".format(key_name)
+ with public_key_path.open() as public_file:
+ public_key = public_file.read()
+ with private_key_path.open() as private_file:
+ private_key = private_file.read()
+ return key_pair(public_key, private_key)
+
+
+def retry(*, tries: int = 30, delay: int = 1):
+ """Decorator for retries.
+
+ Retry a function until code no longer raises an exception or
+ max tries is reached.
+
+ Example:
+ @retry(tries=5, delay=1)
+ def try_something_that_may_not_be_ready():
+ ...
+ """
+
+ def _retry(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ last_error = None
+ for _ in range(tries):
+ try:
+ func(*args, **kwargs)
+ break
+ except Exception as e:
+ last_error = e
+ time.sleep(delay)
+ else:
+ if last_error:
+ raise last_error
+
+ return wrapper
+
+ return _retry
diff --git a/tests/unittests/__init__.py b/tests/unittests/__init__.py
index d89ed443..657cb399 100644
--- a/tests/unittests/__init__.py
+++ b/tests/unittests/__init__.py
@@ -4,6 +4,7 @@ try:
# For test cases, avoid the following UserWarning to stderr:
# You don't have the C version of NameMapper installed ...
from Cheetah import NameMapper as _nm
+
_nm.C_VERSION = True
except ImportError:
pass
diff --git a/tests/unittests/analyze/test_boot.py b/tests/unittests/analyze/test_boot.py
new file mode 100644
index 00000000..68db69ec
--- /dev/null
+++ b/tests/unittests/analyze/test_boot.py
@@ -0,0 +1,174 @@
+import os
+
+from cloudinit.analyze.__main__ import analyze_boot, get_parser
+from cloudinit.analyze.show import (
+ CONTAINER_CODE,
+ FAIL_CODE,
+ SystemctlReader,
+ dist_check_timestamp,
+)
+from tests.unittests.helpers import CiTestCase, mock
+
+err_code = (FAIL_CODE, -1, -1, -1)
+
+
+class TestDistroChecker(CiTestCase):
+ def test_blank_distro(self):
+ self.assertEqual(err_code, dist_check_timestamp())
+
+ @mock.patch("cloudinit.util.is_FreeBSD", return_value=True)
+ def test_freebsd_gentoo_cant_find(self, m_is_FreeBSD):
+ self.assertEqual(err_code, dist_check_timestamp())
+
+ @mock.patch("cloudinit.subp.subp", return_value=(0, 1))
+ def test_subp_fails(self, m_subp):
+ self.assertEqual(err_code, dist_check_timestamp())
+
+
+class TestSystemCtlReader(CiTestCase):
+ def test_systemctl_invalid_property(self):
+ reader = SystemctlReader("dummyProperty")
+ with self.assertRaises(RuntimeError):
+ reader.parse_epoch_as_float()
+
+ def test_systemctl_invalid_parameter(self):
+ reader = SystemctlReader("dummyProperty", "dummyParameter")
+ with self.assertRaises(RuntimeError):
+ reader.parse_epoch_as_float()
+
+ @mock.patch("cloudinit.subp.subp", return_value=("U=1000000", None))
+ def test_systemctl_works_correctly_threshold(self, m_subp):
+ reader = SystemctlReader("dummyProperty", "dummyParameter")
+ self.assertEqual(1.0, reader.parse_epoch_as_float())
+ thresh = 1.0 - reader.parse_epoch_as_float()
+ self.assertTrue(thresh < 1e-6)
+ self.assertTrue(thresh > (-1 * 1e-6))
+
+ @mock.patch("cloudinit.subp.subp", return_value=("U=0", None))
+ def test_systemctl_succeed_zero(self, m_subp):
+ reader = SystemctlReader("dummyProperty", "dummyParameter")
+ self.assertEqual(0.0, reader.parse_epoch_as_float())
+
+ @mock.patch("cloudinit.subp.subp", return_value=("U=1", None))
+ def test_systemctl_succeed_distinct(self, m_subp):
+ reader = SystemctlReader("dummyProperty", "dummyParameter")
+ val1 = reader.parse_epoch_as_float()
+ m_subp.return_value = ("U=2", None)
+ reader2 = SystemctlReader("dummyProperty", "dummyParameter")
+ val2 = reader2.parse_epoch_as_float()
+ self.assertNotEqual(val1, val2)
+
+ @mock.patch("cloudinit.subp.subp", return_value=("100", None))
+ def test_systemctl_epoch_not_splittable(self, m_subp):
+ reader = SystemctlReader("dummyProperty", "dummyParameter")
+ with self.assertRaises(IndexError):
+ reader.parse_epoch_as_float()
+
+ @mock.patch("cloudinit.subp.subp", return_value=("U=foobar", None))
+ def test_systemctl_cannot_convert_epoch_to_float(self, m_subp):
+ reader = SystemctlReader("dummyProperty", "dummyParameter")
+ with self.assertRaises(ValueError):
+ reader.parse_epoch_as_float()
+
+
+class TestAnalyzeBoot(CiTestCase):
+ def set_up_dummy_file_ci(self, path, log_path):
+ infh = open(path, "w+")
+ infh.write(
+ "2019-07-08 17:40:49,601 - util.py[DEBUG]: Cloud-init v. "
+ "19.1-1-gbaa47854-0ubuntu1~18.04.1 running 'init-local' "
+ "at Mon, 08 Jul 2019 17:40:49 +0000. Up 18.84 seconds."
+ )
+ infh.close()
+ outfh = open(log_path, "w+")
+ outfh.close()
+
+ def set_up_dummy_file(self, path, log_path):
+ infh = open(path, "w+")
+ infh.write("dummy data")
+ infh.close()
+ outfh = open(log_path, "w+")
+ outfh.close()
+
+ def remove_dummy_file(self, path, log_path):
+ if os.path.isfile(path):
+ os.remove(path)
+ if os.path.isfile(log_path):
+ os.remove(log_path)
+
+ @mock.patch(
+ "cloudinit.analyze.show.dist_check_timestamp", return_value=err_code
+ )
+ def test_boot_invalid_distro(self, m_dist_check_timestamp):
+
+ path = os.path.dirname(os.path.abspath(__file__))
+ log_path = path + "/boot-test.log"
+ path += "/dummy.log"
+ self.set_up_dummy_file(path, log_path)
+
+ parser = get_parser()
+ args = parser.parse_args(args=["boot", "-i", path, "-o", log_path])
+ name_default = ""
+ analyze_boot(name_default, args)
+ # now args have been tested, go into outfile and make sure error
+ # message is in the outfile
+ outfh = open(args.outfile, "r")
+ data = outfh.read()
+ err_string = (
+ "Your Linux distro or container does not support this "
+ "functionality.\nYou must be running a Kernel "
+ "Telemetry supported distro.\nPlease check "
+ "https://cloudinit.readthedocs.io/en/latest/topics"
+ "/analyze.html for more information on supported "
+ "distros.\n"
+ )
+
+ self.remove_dummy_file(path, log_path)
+ self.assertEqual(err_string, data)
+
+ @mock.patch("cloudinit.util.is_container", return_value=True)
+ @mock.patch("cloudinit.subp.subp", return_value=("U=1000000", None))
+ def test_container_no_ci_log_line(self, m_is_container, m_subp):
+ path = os.path.dirname(os.path.abspath(__file__))
+ log_path = path + "/boot-test.log"
+ path += "/dummy.log"
+ self.set_up_dummy_file(path, log_path)
+
+ parser = get_parser()
+ args = parser.parse_args(args=["boot", "-i", path, "-o", log_path])
+ name_default = ""
+
+ finish_code = analyze_boot(name_default, args)
+
+ self.remove_dummy_file(path, log_path)
+ self.assertEqual(FAIL_CODE, finish_code)
+
+ @mock.patch("cloudinit.util.is_container", return_value=True)
+ @mock.patch("cloudinit.subp.subp", return_value=("U=1000000", None))
+ @mock.patch(
+ "cloudinit.analyze.__main__._get_events",
+ return_value=[
+ {
+ "name": "init-local",
+ "description": "starting search",
+ "timestamp": 100000,
+ }
+ ],
+ )
+ @mock.patch(
+ "cloudinit.analyze.show.dist_check_timestamp",
+ return_value=(CONTAINER_CODE, 1, 1, 1),
+ )
+ def test_container_ci_log_line(self, m_is_container, m_subp, m_get, m_g):
+ path = os.path.dirname(os.path.abspath(__file__))
+ log_path = path + "/boot-test.log"
+ path += "/dummy.log"
+ self.set_up_dummy_file_ci(path, log_path)
+
+ parser = get_parser()
+ args = parser.parse_args(args=["boot", "-i", path, "-o", log_path])
+ name_default = ""
+ finish_code = analyze_boot(name_default, args)
+
+ self.remove_dummy_file(path, log_path)
+ self.assertEqual(CONTAINER_CODE, finish_code)
diff --git a/tests/unittests/analyze/test_dump.py b/tests/unittests/analyze/test_dump.py
new file mode 100644
index 00000000..56bbf97f
--- /dev/null
+++ b/tests/unittests/analyze/test_dump.py
@@ -0,0 +1,247 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from datetime import datetime
+from textwrap import dedent
+
+from cloudinit.analyze.dump import (
+ dump_events,
+ parse_ci_logline,
+ parse_timestamp,
+)
+from cloudinit.subp import which
+from cloudinit.util import write_file
+from tests.unittests.helpers import CiTestCase, mock, skipIf
+
+
+class TestParseTimestamp(CiTestCase):
+ def test_parse_timestamp_handles_cloud_init_default_format(self):
+ """Logs with cloud-init detailed formats will be properly parsed."""
+ trusty_fmt = "%Y-%m-%d %H:%M:%S,%f"
+ trusty_stamp = "2016-09-12 14:39:20,839"
+ dt = datetime.strptime(trusty_stamp, trusty_fmt)
+ self.assertEqual(
+ float(dt.strftime("%s.%f")), parse_timestamp(trusty_stamp)
+ )
+
+ def test_parse_timestamp_handles_syslog_adding_year(self):
+ """Syslog timestamps lack a year. Add year and properly parse."""
+ syslog_fmt = "%b %d %H:%M:%S %Y"
+ syslog_stamp = "Aug 08 15:12:51"
+
+ # convert stamp ourselves by adding the missing year value
+ year = datetime.now().year
+ dt = datetime.strptime(syslog_stamp + " " + str(year), syslog_fmt)
+ self.assertEqual(
+ float(dt.strftime("%s.%f")), parse_timestamp(syslog_stamp)
+ )
+
+ def test_parse_timestamp_handles_journalctl_format_adding_year(self):
+ """Journalctl precise timestamps lack a year. Add year and parse."""
+ journal_fmt = "%b %d %H:%M:%S.%f %Y"
+ journal_stamp = "Aug 08 17:15:50.606811"
+
+ # convert stamp ourselves by adding the missing year value
+ year = datetime.now().year
+ dt = datetime.strptime(journal_stamp + " " + str(year), journal_fmt)
+ self.assertEqual(
+ float(dt.strftime("%s.%f")), parse_timestamp(journal_stamp)
+ )
+
+ @skipIf(not which("date"), "'date' command not available.")
+ def test_parse_unexpected_timestamp_format_with_date_command(self):
+ """Dump sends unexpected timestamp formats to date for processing."""
+ new_fmt = "%H:%M %m/%d %Y"
+ new_stamp = "17:15 08/08"
+ # convert stamp ourselves by adding the missing year value
+ year = datetime.now().year
+ dt = datetime.strptime(new_stamp + " " + str(year), new_fmt)
+
+ # use date(1)
+ with self.allow_subp(["date"]):
+ self.assertEqual(
+ float(dt.strftime("%s.%f")), parse_timestamp(new_stamp)
+ )
+
+
+class TestParseCILogLine(CiTestCase):
+ def test_parse_logline_returns_none_without_separators(self):
+ """When no separators are found, parse_ci_logline returns None."""
+ expected_parse_ignores = [
+ "",
+ "-",
+ "adsf-asdf",
+ "2017-05-22 18:02:01,088",
+ "CLOUDINIT",
+ ]
+ for parse_ignores in expected_parse_ignores:
+ self.assertIsNone(parse_ci_logline(parse_ignores))
+
+ def test_parse_logline_returns_event_for_cloud_init_logs(self):
+ """parse_ci_logline returns an event parse from cloud-init format."""
+ line = (
+ "2017-08-08 20:05:07,147 - util.py[DEBUG]: Cloud-init v. 0.7.9"
+ " running 'init-local' at Tue, 08 Aug 2017 20:05:07 +0000. Up"
+ " 6.26 seconds."
+ )
+ dt = datetime.strptime(
+ "2017-08-08 20:05:07,147", "%Y-%m-%d %H:%M:%S,%f"
+ )
+ timestamp = float(dt.strftime("%s.%f"))
+ expected = {
+ "description": "starting search for local datasources",
+ "event_type": "start",
+ "name": "init-local",
+ "origin": "cloudinit",
+ "timestamp": timestamp,
+ }
+ self.assertEqual(expected, parse_ci_logline(line))
+
+ def test_parse_logline_returns_event_for_journalctl_logs(self):
+ """parse_ci_logline returns an event parse from journalctl format."""
+ line = (
+ "Nov 03 06:51:06.074410 x2 cloud-init[106]: [CLOUDINIT]"
+ " util.py[DEBUG]: Cloud-init v. 0.7.8 running 'init-local' at"
+ " Thu, 03 Nov 2016 06:51:06 +0000. Up 1.0 seconds."
+ )
+ year = datetime.now().year
+ dt = datetime.strptime(
+ "Nov 03 06:51:06.074410 %d" % year, "%b %d %H:%M:%S.%f %Y"
+ )
+ timestamp = float(dt.strftime("%s.%f"))
+ expected = {
+ "description": "starting search for local datasources",
+ "event_type": "start",
+ "name": "init-local",
+ "origin": "cloudinit",
+ "timestamp": timestamp,
+ }
+ self.assertEqual(expected, parse_ci_logline(line))
+
+ @mock.patch("cloudinit.analyze.dump.parse_timestamp_from_date")
+ def test_parse_logline_returns_event_for_finish_events(
+ self, m_parse_from_date
+ ):
+ """parse_ci_logline returns a finish event for a parsed log line."""
+ line = (
+ "2016-08-30 21:53:25.972325+00:00 y1 [CLOUDINIT]"
+ " handlers.py[DEBUG]: finish: modules-final: SUCCESS: running"
+ " modules for final"
+ )
+ expected = {
+ "description": "running modules for final",
+ "event_type": "finish",
+ "name": "modules-final",
+ "origin": "cloudinit",
+ "result": "SUCCESS",
+ "timestamp": 1472594005.972,
+ }
+ m_parse_from_date.return_value = "1472594005.972"
+ self.assertEqual(expected, parse_ci_logline(line))
+ m_parse_from_date.assert_has_calls(
+ [mock.call("2016-08-30 21:53:25.972325+00:00")]
+ )
+
+ def test_parse_logline_returns_event_for_amazon_linux_2_line(self):
+ line = (
+ "Apr 30 19:39:11 cloud-init[2673]: handlers.py[DEBUG]: start:"
+ " init-local/check-cache: attempting to read from cache [check]"
+ )
+ # Generate the expected value using `datetime`, so that TZ
+ # determination is consistent with the code under test.
+ timestamp_dt = datetime.strptime(
+ "Apr 30 19:39:11", "%b %d %H:%M:%S"
+ ).replace(year=datetime.now().year)
+ expected = {
+ "description": "attempting to read from cache [check]",
+ "event_type": "start",
+ "name": "init-local/check-cache",
+ "origin": "cloudinit",
+ "timestamp": timestamp_dt.timestamp(),
+ }
+ self.assertEqual(expected, parse_ci_logline(line))
+
+
+SAMPLE_LOGS = dedent(
+ """\
+Nov 03 06:51:06.074410 x2 cloud-init[106]: [CLOUDINIT] util.py[DEBUG]:\
+ Cloud-init v. 0.7.8 running 'init-local' at Thu, 03 Nov 2016\
+ 06:51:06 +0000. Up 1.0 seconds.
+2016-08-30 21:53:25.972325+00:00 y1 [CLOUDINIT] handlers.py[DEBUG]: finish:\
+ modules-final: SUCCESS: running modules for final
+"""
+)
+
+
+class TestDumpEvents(CiTestCase):
+ maxDiff = None
+
+ @mock.patch("cloudinit.analyze.dump.parse_timestamp_from_date")
+ def test_dump_events_with_rawdata(self, m_parse_from_date):
+ """Rawdata is split and parsed into a tuple of events and data"""
+ m_parse_from_date.return_value = "1472594005.972"
+ events, data = dump_events(rawdata=SAMPLE_LOGS)
+ expected_data = SAMPLE_LOGS.splitlines()
+ self.assertEqual(
+ [mock.call("2016-08-30 21:53:25.972325+00:00")],
+ m_parse_from_date.call_args_list,
+ )
+ self.assertEqual(expected_data, data)
+ year = datetime.now().year
+ dt1 = datetime.strptime(
+ "Nov 03 06:51:06.074410 %d" % year, "%b %d %H:%M:%S.%f %Y"
+ )
+ timestamp1 = float(dt1.strftime("%s.%f"))
+ expected_events = [
+ {
+ "description": "starting search for local datasources",
+ "event_type": "start",
+ "name": "init-local",
+ "origin": "cloudinit",
+ "timestamp": timestamp1,
+ },
+ {
+ "description": "running modules for final",
+ "event_type": "finish",
+ "name": "modules-final",
+ "origin": "cloudinit",
+ "result": "SUCCESS",
+ "timestamp": 1472594005.972,
+ },
+ ]
+ self.assertEqual(expected_events, events)
+
+ @mock.patch("cloudinit.analyze.dump.parse_timestamp_from_date")
+ def test_dump_events_with_cisource(self, m_parse_from_date):
+ """Cisource file is read and parsed into a tuple of events and data."""
+ tmpfile = self.tmp_path("logfile")
+ write_file(tmpfile, SAMPLE_LOGS)
+ m_parse_from_date.return_value = 1472594005.972
+
+ events, data = dump_events(cisource=open(tmpfile))
+ year = datetime.now().year
+ dt1 = datetime.strptime(
+ "Nov 03 06:51:06.074410 %d" % year, "%b %d %H:%M:%S.%f %Y"
+ )
+ timestamp1 = float(dt1.strftime("%s.%f"))
+ expected_events = [
+ {
+ "description": "starting search for local datasources",
+ "event_type": "start",
+ "name": "init-local",
+ "origin": "cloudinit",
+ "timestamp": timestamp1,
+ },
+ {
+ "description": "running modules for final",
+ "event_type": "finish",
+ "name": "modules-final",
+ "origin": "cloudinit",
+ "result": "SUCCESS",
+ "timestamp": 1472594005.972,
+ },
+ ]
+ self.assertEqual(expected_events, events)
+ self.assertEqual(SAMPLE_LOGS.splitlines(), [d.strip() for d in data])
+ m_parse_from_date.assert_has_calls(
+ [mock.call("2016-08-30 21:53:25.972325+00:00")]
+ )
diff --git a/tests/cloud_tests/platforms/azurecloud/__init__.py b/tests/unittests/cloudinit/__init__py
index e69de29b..e69de29b 100644
--- a/tests/cloud_tests/platforms/azurecloud/__init__.py
+++ b/tests/unittests/cloudinit/__init__py
diff --git a/tests/cloud_tests/platforms/ec2/__init__.py b/tests/unittests/cmd/__init__.py
index e69de29b..e69de29b 100644
--- a/tests/cloud_tests/platforms/ec2/__init__.py
+++ b/tests/unittests/cmd/__init__.py
diff --git a/tests/cloud_tests/platforms/lxd/__init__.py b/tests/unittests/cmd/devel/__init__.py
index e69de29b..e69de29b 100644
--- a/tests/cloud_tests/platforms/lxd/__init__.py
+++ b/tests/unittests/cmd/devel/__init__.py
diff --git a/tests/unittests/cmd/devel/test_hotplug_hook.py b/tests/unittests/cmd/devel/test_hotplug_hook.py
new file mode 100644
index 00000000..5ecb5969
--- /dev/null
+++ b/tests/unittests/cmd/devel/test_hotplug_hook.py
@@ -0,0 +1,236 @@
+from collections import namedtuple
+from unittest import mock
+from unittest.mock import call
+
+import pytest
+
+from cloudinit.cmd.devel.hotplug_hook import handle_hotplug
+from cloudinit.distros import Distro
+from cloudinit.event import EventType
+from cloudinit.net.activators import NetworkActivator
+from cloudinit.net.network_state import NetworkState
+from cloudinit.sources import DataSource
+from cloudinit.stages import Init
+
+hotplug_args = namedtuple("hotplug_args", "udevaction, subsystem, devpath")
+FAKE_MAC = "11:22:33:44:55:66"
+
+
+@pytest.fixture
+def mocks():
+ m_init = mock.MagicMock(spec=Init)
+ m_distro = mock.MagicMock(spec=Distro)
+ m_datasource = mock.MagicMock(spec=DataSource)
+ m_datasource.distro = m_distro
+ m_init.datasource = m_datasource
+ m_init.fetch.return_value = m_datasource
+
+ read_sys_net = mock.patch(
+ "cloudinit.cmd.devel.hotplug_hook.read_sys_net_safe",
+ return_value=FAKE_MAC,
+ )
+
+ update_event_enabled = mock.patch(
+ "cloudinit.stages.update_event_enabled",
+ return_value=True,
+ )
+
+ m_network_state = mock.MagicMock(spec=NetworkState)
+ parse_net = mock.patch(
+ "cloudinit.cmd.devel.hotplug_hook.parse_net_config_data",
+ return_value=m_network_state,
+ )
+
+ m_activator = mock.MagicMock(spec=NetworkActivator)
+ select_activator = mock.patch(
+ "cloudinit.cmd.devel.hotplug_hook.activators.select_activator",
+ return_value=m_activator,
+ )
+
+ sleep = mock.patch("time.sleep")
+
+ read_sys_net.start()
+ update_event_enabled.start()
+ parse_net.start()
+ select_activator.start()
+ m_sleep = sleep.start()
+
+ yield namedtuple("mocks", "m_init m_network_state m_activator m_sleep")(
+ m_init=m_init,
+ m_network_state=m_network_state,
+ m_activator=m_activator,
+ m_sleep=m_sleep,
+ )
+
+ read_sys_net.stop()
+ update_event_enabled.stop()
+ parse_net.stop()
+ select_activator.stop()
+ sleep.stop()
+
+
+class TestUnsupportedActions:
+ def test_unsupported_subsystem(self, mocks):
+ with pytest.raises(
+ Exception, match="cannot handle events for subsystem: not_real"
+ ):
+ handle_hotplug(
+ hotplug_init=mocks.m_init,
+ devpath="/dev/fake",
+ subsystem="not_real",
+ udevaction="add",
+ )
+
+ def test_unsupported_udevaction(self, mocks):
+ with pytest.raises(ValueError, match="Unknown action: not_real"):
+ handle_hotplug(
+ hotplug_init=mocks.m_init,
+ devpath="/dev/fake",
+ udevaction="not_real",
+ subsystem="net",
+ )
+
+
+class TestHotplug:
+ def test_succcessful_add(self, mocks):
+ init = mocks.m_init
+ mocks.m_network_state.iter_interfaces.return_value = [
+ {
+ "mac_address": FAKE_MAC,
+ }
+ ]
+ handle_hotplug(
+ hotplug_init=init,
+ devpath="/dev/fake",
+ udevaction="add",
+ subsystem="net",
+ )
+ init.datasource.update_metadata_if_supported.assert_called_once_with(
+ [EventType.HOTPLUG]
+ )
+ mocks.m_activator.bring_up_interface.assert_called_once_with("fake")
+ mocks.m_activator.bring_down_interface.assert_not_called()
+ init._write_to_cache.assert_called_once_with()
+
+ def test_successful_remove(self, mocks):
+ init = mocks.m_init
+ mocks.m_network_state.iter_interfaces.return_value = [{}]
+ handle_hotplug(
+ hotplug_init=init,
+ devpath="/dev/fake",
+ udevaction="remove",
+ subsystem="net",
+ )
+ init.datasource.update_metadata_if_supported.assert_called_once_with(
+ [EventType.HOTPLUG]
+ )
+ mocks.m_activator.bring_down_interface.assert_called_once_with("fake")
+ mocks.m_activator.bring_up_interface.assert_not_called()
+ init._write_to_cache.assert_called_once_with()
+
+ def test_update_event_disabled(self, mocks, caplog):
+ init = mocks.m_init
+ with mock.patch(
+ "cloudinit.stages.update_event_enabled", return_value=False
+ ):
+ handle_hotplug(
+ hotplug_init=init,
+ devpath="/dev/fake",
+ udevaction="remove",
+ subsystem="net",
+ )
+ assert "hotplug not enabled for event of type" in caplog.text
+ init.datasource.update_metadata_if_supported.assert_not_called()
+ mocks.m_activator.bring_up_interface.assert_not_called()
+ mocks.m_activator.bring_down_interface.assert_not_called()
+ init._write_to_cache.assert_not_called()
+
+ def test_update_metadata_failed(self, mocks):
+ mocks.m_init.datasource.update_metadata_if_supported.return_value = (
+ False
+ )
+ with pytest.raises(
+ RuntimeError, match="Datasource .* not updated for event hotplug"
+ ):
+ handle_hotplug(
+ hotplug_init=mocks.m_init,
+ devpath="/dev/fake",
+ udevaction="remove",
+ subsystem="net",
+ )
+
+ def test_detect_hotplugged_device_not_detected_on_add(self, mocks):
+ mocks.m_network_state.iter_interfaces.return_value = [{}]
+ with pytest.raises(
+ RuntimeError,
+ match="Failed to detect {} in updated metadata".format(FAKE_MAC),
+ ):
+ handle_hotplug(
+ hotplug_init=mocks.m_init,
+ devpath="/dev/fake",
+ udevaction="add",
+ subsystem="net",
+ )
+
+ def test_detect_hotplugged_device_detected_on_remove(self, mocks):
+ mocks.m_network_state.iter_interfaces.return_value = [
+ {
+ "mac_address": FAKE_MAC,
+ }
+ ]
+ with pytest.raises(
+ RuntimeError, match="Failed to detect .* in updated metadata"
+ ):
+ handle_hotplug(
+ hotplug_init=mocks.m_init,
+ devpath="/dev/fake",
+ udevaction="remove",
+ subsystem="net",
+ )
+
+ def test_apply_failed_on_add(self, mocks):
+ mocks.m_network_state.iter_interfaces.return_value = [
+ {
+ "mac_address": FAKE_MAC,
+ }
+ ]
+ mocks.m_activator.bring_up_interface.return_value = False
+ with pytest.raises(
+ RuntimeError, match="Failed to bring up device: /dev/fake"
+ ):
+ handle_hotplug(
+ hotplug_init=mocks.m_init,
+ devpath="/dev/fake",
+ udevaction="add",
+ subsystem="net",
+ )
+
+ def test_apply_failed_on_remove(self, mocks):
+ mocks.m_network_state.iter_interfaces.return_value = [{}]
+ mocks.m_activator.bring_down_interface.return_value = False
+ with pytest.raises(
+ RuntimeError, match="Failed to bring down device: /dev/fake"
+ ):
+ handle_hotplug(
+ hotplug_init=mocks.m_init,
+ devpath="/dev/fake",
+ udevaction="remove",
+ subsystem="net",
+ )
+
+ def test_retry(self, mocks):
+ with pytest.raises(RuntimeError):
+ handle_hotplug(
+ hotplug_init=mocks.m_init,
+ devpath="/dev/fake",
+ udevaction="add",
+ subsystem="net",
+ )
+ assert mocks.m_sleep.call_count == 5
+ assert mocks.m_sleep.call_args_list == [
+ call(1),
+ call(3),
+ call(5),
+ call(10),
+ call(30),
+ ]
diff --git a/tests/unittests/cmd/devel/test_logs.py b/tests/unittests/cmd/devel/test_logs.py
new file mode 100644
index 00000000..73ed3c65
--- /dev/null
+++ b/tests/unittests/cmd/devel/test_logs.py
@@ -0,0 +1,213 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import os
+from datetime import datetime
+from io import StringIO
+
+from cloudinit.cmd.devel import logs
+from cloudinit.sources import INSTANCE_JSON_SENSITIVE_FILE
+from cloudinit.subp import subp
+from cloudinit.util import ensure_dir, load_file, write_file
+from tests.unittests.helpers import (
+ FilesystemMockingTestCase,
+ mock,
+ wrap_and_call,
+)
+
+
+@mock.patch("cloudinit.cmd.devel.logs.os.getuid")
+class TestCollectLogs(FilesystemMockingTestCase):
+ def setUp(self):
+ super(TestCollectLogs, self).setUp()
+ self.new_root = self.tmp_dir()
+ self.run_dir = self.tmp_path("run", self.new_root)
+
+ def test_collect_logs_with_userdata_requires_root_user(self, m_getuid):
+ """collect-logs errors when non-root user collects userdata ."""
+ m_getuid.return_value = 100 # non-root
+ output_tarfile = self.tmp_path("logs.tgz")
+ with mock.patch("sys.stderr", new_callable=StringIO) as m_stderr:
+ self.assertEqual(
+ 1, logs.collect_logs(output_tarfile, include_userdata=True)
+ )
+ self.assertEqual(
+ "To include userdata, root user is required."
+ " Try sudo cloud-init collect-logs\n",
+ m_stderr.getvalue(),
+ )
+
+ def test_collect_logs_creates_tarfile(self, m_getuid):
+ """collect-logs creates a tarfile with all related cloud-init info."""
+ m_getuid.return_value = 100
+ log1 = self.tmp_path("cloud-init.log", self.new_root)
+ write_file(log1, "cloud-init-log")
+ log2 = self.tmp_path("cloud-init-output.log", self.new_root)
+ write_file(log2, "cloud-init-output-log")
+ ensure_dir(self.run_dir)
+ write_file(self.tmp_path("results.json", self.run_dir), "results")
+ write_file(
+ self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, self.run_dir),
+ "sensitive",
+ )
+ output_tarfile = self.tmp_path("logs.tgz")
+
+ date = datetime.utcnow().date().strftime("%Y-%m-%d")
+ date_logdir = "cloud-init-logs-{0}".format(date)
+
+ version_out = "/usr/bin/cloud-init 18.2fake\n"
+ expected_subp = {
+ (
+ "dpkg-query",
+ "--show",
+ "-f=${Version}\n",
+ "cloud-init",
+ ): "0.7fake\n",
+ ("cloud-init", "--version"): version_out,
+ ("dmesg",): "dmesg-out\n",
+ ("journalctl", "--boot=0", "-o", "short-precise"): "journal-out\n",
+ ("tar", "czvf", output_tarfile, date_logdir): "",
+ }
+
+ def fake_subp(cmd):
+ cmd_tuple = tuple(cmd)
+ if cmd_tuple not in expected_subp:
+ raise AssertionError(
+ "Unexpected command provided to subp: {0}".format(cmd)
+ )
+ if cmd == ["tar", "czvf", output_tarfile, date_logdir]:
+ subp(cmd) # Pass through tar cmd so we can check output
+ return expected_subp[cmd_tuple], ""
+
+ fake_stderr = mock.MagicMock()
+
+ wrap_and_call(
+ "cloudinit.cmd.devel.logs",
+ {
+ "subp": {"side_effect": fake_subp},
+ "sys.stderr": {"new": fake_stderr},
+ "CLOUDINIT_LOGS": {"new": [log1, log2]},
+ "CLOUDINIT_RUN_DIR": {"new": self.run_dir},
+ },
+ logs.collect_logs,
+ output_tarfile,
+ include_userdata=False,
+ )
+ # unpack the tarfile and check file contents
+ subp(["tar", "zxvf", output_tarfile, "-C", self.new_root])
+ out_logdir = self.tmp_path(date_logdir, self.new_root)
+ self.assertFalse(
+ os.path.exists(
+ os.path.join(
+ out_logdir,
+ "run",
+ "cloud-init",
+ INSTANCE_JSON_SENSITIVE_FILE,
+ )
+ ),
+ "Unexpected file found: %s" % INSTANCE_JSON_SENSITIVE_FILE,
+ )
+ self.assertEqual(
+ "0.7fake\n", load_file(os.path.join(out_logdir, "dpkg-version"))
+ )
+ self.assertEqual(
+ version_out, load_file(os.path.join(out_logdir, "version"))
+ )
+ self.assertEqual(
+ "cloud-init-log",
+ load_file(os.path.join(out_logdir, "cloud-init.log")),
+ )
+ self.assertEqual(
+ "cloud-init-output-log",
+ load_file(os.path.join(out_logdir, "cloud-init-output.log")),
+ )
+ self.assertEqual(
+ "dmesg-out\n", load_file(os.path.join(out_logdir, "dmesg.txt"))
+ )
+ self.assertEqual(
+ "journal-out\n", load_file(os.path.join(out_logdir, "journal.txt"))
+ )
+ self.assertEqual(
+ "results",
+ load_file(
+ os.path.join(out_logdir, "run", "cloud-init", "results.json")
+ ),
+ )
+ fake_stderr.write.assert_any_call("Wrote %s\n" % output_tarfile)
+
+ def test_collect_logs_includes_optional_userdata(self, m_getuid):
+ """collect-logs include userdata when --include-userdata is set."""
+ m_getuid.return_value = 0
+ log1 = self.tmp_path("cloud-init.log", self.new_root)
+ write_file(log1, "cloud-init-log")
+ log2 = self.tmp_path("cloud-init-output.log", self.new_root)
+ write_file(log2, "cloud-init-output-log")
+ userdata = self.tmp_path("user-data.txt", self.new_root)
+ write_file(userdata, "user-data")
+ ensure_dir(self.run_dir)
+ write_file(self.tmp_path("results.json", self.run_dir), "results")
+ write_file(
+ self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, self.run_dir),
+ "sensitive",
+ )
+ output_tarfile = self.tmp_path("logs.tgz")
+
+ date = datetime.utcnow().date().strftime("%Y-%m-%d")
+ date_logdir = "cloud-init-logs-{0}".format(date)
+
+ version_out = "/usr/bin/cloud-init 18.2fake\n"
+ expected_subp = {
+ (
+ "dpkg-query",
+ "--show",
+ "-f=${Version}\n",
+ "cloud-init",
+ ): "0.7fake",
+ ("cloud-init", "--version"): version_out,
+ ("dmesg",): "dmesg-out\n",
+ ("journalctl", "--boot=0", "-o", "short-precise"): "journal-out\n",
+ ("tar", "czvf", output_tarfile, date_logdir): "",
+ }
+
+ def fake_subp(cmd):
+ cmd_tuple = tuple(cmd)
+ if cmd_tuple not in expected_subp:
+ raise AssertionError(
+ "Unexpected command provided to subp: {0}".format(cmd)
+ )
+ if cmd == ["tar", "czvf", output_tarfile, date_logdir]:
+ subp(cmd) # Pass through tar cmd so we can check output
+ return expected_subp[cmd_tuple], ""
+
+ fake_stderr = mock.MagicMock()
+
+ wrap_and_call(
+ "cloudinit.cmd.devel.logs",
+ {
+ "subp": {"side_effect": fake_subp},
+ "sys.stderr": {"new": fake_stderr},
+ "CLOUDINIT_LOGS": {"new": [log1, log2]},
+ "CLOUDINIT_RUN_DIR": {"new": self.run_dir},
+ "USER_DATA_FILE": {"new": userdata},
+ },
+ logs.collect_logs,
+ output_tarfile,
+ include_userdata=True,
+ )
+ # unpack the tarfile and check file contents
+ subp(["tar", "zxvf", output_tarfile, "-C", self.new_root])
+ out_logdir = self.tmp_path(date_logdir, self.new_root)
+ self.assertEqual(
+ "user-data", load_file(os.path.join(out_logdir, "user-data.txt"))
+ )
+ self.assertEqual(
+ "sensitive",
+ load_file(
+ os.path.join(
+ out_logdir,
+ "run",
+ "cloud-init",
+ INSTANCE_JSON_SENSITIVE_FILE,
+ )
+ ),
+ )
+ fake_stderr.write.assert_any_call("Wrote %s\n" % output_tarfile)
diff --git a/tests/unittests/cmd/devel/test_render.py b/tests/unittests/cmd/devel/test_render.py
new file mode 100644
index 00000000..4afc64f0
--- /dev/null
+++ b/tests/unittests/cmd/devel/test_render.py
@@ -0,0 +1,154 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import os
+from collections import namedtuple
+from io import StringIO
+
+from cloudinit.cmd.devel import render
+from cloudinit.helpers import Paths
+from cloudinit.sources import INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE
+from cloudinit.util import ensure_dir, write_file
+from tests.unittests.helpers import CiTestCase, mock, skipUnlessJinja
+
+
+class TestRender(CiTestCase):
+
+ with_logs = True
+
+ args = namedtuple("renderargs", "user_data instance_data debug")
+
+ def setUp(self):
+ super(TestRender, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ def test_handle_args_error_on_missing_user_data(self):
+ """When user_data file path does not exist, log an error."""
+ absent_file = self.tmp_path("user-data", dir=self.tmp)
+ instance_data = self.tmp_path("instance-data", dir=self.tmp)
+ write_file(instance_data, "{}")
+ args = self.args(
+ user_data=absent_file, instance_data=instance_data, debug=False
+ )
+ with mock.patch("sys.stderr", new_callable=StringIO):
+ self.assertEqual(1, render.handle_args("anyname", args))
+ self.assertIn(
+ "Missing user-data file: %s" % absent_file, self.logs.getvalue()
+ )
+
+ def test_handle_args_error_on_missing_instance_data(self):
+ """When instance_data file path does not exist, log an error."""
+ user_data = self.tmp_path("user-data", dir=self.tmp)
+ absent_file = self.tmp_path("instance-data", dir=self.tmp)
+ args = self.args(
+ user_data=user_data, instance_data=absent_file, debug=False
+ )
+ with mock.patch("sys.stderr", new_callable=StringIO):
+ self.assertEqual(1, render.handle_args("anyname", args))
+ self.assertIn(
+ "Missing instance-data.json file: %s" % absent_file,
+ self.logs.getvalue(),
+ )
+
+ def test_handle_args_defaults_instance_data(self):
+ """When no instance_data argument, default to configured run_dir."""
+ user_data = self.tmp_path("user-data", dir=self.tmp)
+ run_dir = self.tmp_path("run_dir", dir=self.tmp)
+ ensure_dir(run_dir)
+ paths = Paths({"run_dir": run_dir})
+ self.add_patch("cloudinit.cmd.devel.render.read_cfg_paths", "m_paths")
+ self.m_paths.return_value = paths
+ args = self.args(user_data=user_data, instance_data=None, debug=False)
+ with mock.patch("sys.stderr", new_callable=StringIO):
+ self.assertEqual(1, render.handle_args("anyname", args))
+ json_file = os.path.join(run_dir, INSTANCE_JSON_FILE)
+ self.assertIn(
+ "Missing instance-data.json file: %s" % json_file,
+ self.logs.getvalue(),
+ )
+
+ def test_handle_args_root_fallback_from_sensitive_instance_data(self):
+ """When root user defaults to sensitive.json."""
+ user_data = self.tmp_path("user-data", dir=self.tmp)
+ run_dir = self.tmp_path("run_dir", dir=self.tmp)
+ ensure_dir(run_dir)
+ paths = Paths({"run_dir": run_dir})
+ self.add_patch("cloudinit.cmd.devel.render.read_cfg_paths", "m_paths")
+ self.m_paths.return_value = paths
+ args = self.args(user_data=user_data, instance_data=None, debug=False)
+ with mock.patch("sys.stderr", new_callable=StringIO):
+ with mock.patch("os.getuid") as m_getuid:
+ m_getuid.return_value = 0
+ self.assertEqual(1, render.handle_args("anyname", args))
+ json_file = os.path.join(run_dir, INSTANCE_JSON_FILE)
+ json_sensitive = os.path.join(run_dir, INSTANCE_JSON_SENSITIVE_FILE)
+ self.assertIn(
+ "WARNING: Missing root-readable %s. Using redacted %s"
+ % (json_sensitive, json_file),
+ self.logs.getvalue(),
+ )
+ self.assertIn(
+ "ERROR: Missing instance-data.json file: %s" % json_file,
+ self.logs.getvalue(),
+ )
+
+ def test_handle_args_root_uses_sensitive_instance_data(self):
+ """When root user, and no instance-data arg, use sensitive.json."""
+ user_data = self.tmp_path("user-data", dir=self.tmp)
+ write_file(user_data, "##template: jinja\nrendering: {{ my_var }}")
+ run_dir = self.tmp_path("run_dir", dir=self.tmp)
+ ensure_dir(run_dir)
+ json_sensitive = os.path.join(run_dir, INSTANCE_JSON_SENSITIVE_FILE)
+ write_file(json_sensitive, '{"my-var": "jinja worked"}')
+ paths = Paths({"run_dir": run_dir})
+ self.add_patch("cloudinit.cmd.devel.render.read_cfg_paths", "m_paths")
+ self.m_paths.return_value = paths
+ args = self.args(user_data=user_data, instance_data=None, debug=False)
+ with mock.patch("sys.stderr", new_callable=StringIO):
+ with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
+ with mock.patch("os.getuid") as m_getuid:
+ m_getuid.return_value = 0
+ self.assertEqual(0, render.handle_args("anyname", args))
+ self.assertIn("rendering: jinja worked", m_stdout.getvalue())
+
+ @skipUnlessJinja()
+ def test_handle_args_renders_instance_data_vars_in_template(self):
+ """If user_data file is a jinja template render instance-data vars."""
+ user_data = self.tmp_path("user-data", dir=self.tmp)
+ write_file(user_data, "##template: jinja\nrendering: {{ my_var }}")
+ instance_data = self.tmp_path("instance-data", dir=self.tmp)
+ write_file(instance_data, '{"my-var": "jinja worked"}')
+ args = self.args(
+ user_data=user_data, instance_data=instance_data, debug=True
+ )
+ with mock.patch("sys.stderr", new_callable=StringIO) as m_console_err:
+ with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
+ self.assertEqual(0, render.handle_args("anyname", args))
+ self.assertIn(
+ "DEBUG: Converted jinja variables\n{", self.logs.getvalue()
+ )
+ self.assertIn(
+ "DEBUG: Converted jinja variables\n{", m_console_err.getvalue()
+ )
+ self.assertEqual("rendering: jinja worked", m_stdout.getvalue())
+
+ @skipUnlessJinja()
+ def test_handle_args_warns_and_gives_up_on_invalid_jinja_operation(self):
+ """If user_data file has invalid jinja operations log warnings."""
+ user_data = self.tmp_path("user-data", dir=self.tmp)
+ write_file(user_data, "##template: jinja\nrendering: {{ my-var }}")
+ instance_data = self.tmp_path("instance-data", dir=self.tmp)
+ write_file(instance_data, '{"my-var": "jinja worked"}')
+ args = self.args(
+ user_data=user_data, instance_data=instance_data, debug=True
+ )
+ with mock.patch("sys.stderr", new_callable=StringIO):
+ self.assertEqual(1, render.handle_args("anyname", args))
+ self.assertIn(
+ "WARNING: Ignoring jinja template for %s: Undefined jinja"
+ ' variable: "my-var". Jinja tried subtraction. Perhaps you meant'
+ ' "my_var"?' % user_data,
+ self.logs.getvalue(),
+ )
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/cmd/test_clean.py b/tests/unittests/cmd/test_clean.py
new file mode 100644
index 00000000..7d12017e
--- /dev/null
+++ b/tests/unittests/cmd/test_clean.py
@@ -0,0 +1,211 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import os
+from collections import namedtuple
+from io import StringIO
+
+from cloudinit.cmd import clean
+from cloudinit.util import ensure_dir, sym_link, write_file
+from tests.unittests.helpers import CiTestCase, mock, wrap_and_call
+
+mypaths = namedtuple("MyPaths", "cloud_dir")
+
+
+class TestClean(CiTestCase):
+ def setUp(self):
+ super(TestClean, self).setUp()
+ self.new_root = self.tmp_dir()
+ self.artifact_dir = self.tmp_path("artifacts", self.new_root)
+ self.log1 = self.tmp_path("cloud-init.log", self.new_root)
+ self.log2 = self.tmp_path("cloud-init-output.log", self.new_root)
+
+ class FakeInit(object):
+ cfg = {
+ "def_log_file": self.log1,
+ "output": {"all": "|tee -a {0}".format(self.log2)},
+ }
+ # Ensure cloud_dir has a trailing slash, to match real behaviour
+ paths = mypaths(cloud_dir="{}/".format(self.artifact_dir))
+
+ def __init__(self, ds_deps):
+ pass
+
+ def read_cfg(self):
+ pass
+
+ self.init_class = FakeInit
+
+ def test_remove_artifacts_removes_logs(self):
+ """remove_artifacts removes logs when remove_logs is True."""
+ write_file(self.log1, "cloud-init-log")
+ write_file(self.log2, "cloud-init-output-log")
+
+ self.assertFalse(
+ os.path.exists(self.artifact_dir), "Unexpected artifacts dir"
+ )
+ retcode = wrap_and_call(
+ "cloudinit.cmd.clean",
+ {"Init": {"side_effect": self.init_class}},
+ clean.remove_artifacts,
+ remove_logs=True,
+ )
+ self.assertFalse(os.path.exists(self.log1), "Unexpected file")
+ self.assertFalse(os.path.exists(self.log2), "Unexpected file")
+ self.assertEqual(0, retcode)
+
+ def test_remove_artifacts_preserves_logs(self):
+ """remove_artifacts leaves logs when remove_logs is False."""
+ write_file(self.log1, "cloud-init-log")
+ write_file(self.log2, "cloud-init-output-log")
+
+ retcode = wrap_and_call(
+ "cloudinit.cmd.clean",
+ {"Init": {"side_effect": self.init_class}},
+ clean.remove_artifacts,
+ remove_logs=False,
+ )
+ self.assertTrue(os.path.exists(self.log1), "Missing expected file")
+ self.assertTrue(os.path.exists(self.log2), "Missing expected file")
+ self.assertEqual(0, retcode)
+
+ def test_remove_artifacts_removes_unlinks_symlinks(self):
+ """remove_artifacts cleans artifacts dir unlinking any symlinks."""
+ dir1 = os.path.join(self.artifact_dir, "dir1")
+ ensure_dir(dir1)
+ symlink = os.path.join(self.artifact_dir, "mylink")
+ sym_link(dir1, symlink)
+
+ retcode = wrap_and_call(
+ "cloudinit.cmd.clean",
+ {"Init": {"side_effect": self.init_class}},
+ clean.remove_artifacts,
+ remove_logs=False,
+ )
+ self.assertEqual(0, retcode)
+ for path in (dir1, symlink):
+ self.assertFalse(
+ os.path.exists(path), "Unexpected {0} dir".format(path)
+ )
+
+ def test_remove_artifacts_removes_artifacts_skipping_seed(self):
+ """remove_artifacts cleans artifacts dir with exception of seed dir."""
+ dirs = [
+ self.artifact_dir,
+ os.path.join(self.artifact_dir, "seed"),
+ os.path.join(self.artifact_dir, "dir1"),
+ os.path.join(self.artifact_dir, "dir2"),
+ ]
+ for _dir in dirs:
+ ensure_dir(_dir)
+
+ retcode = wrap_and_call(
+ "cloudinit.cmd.clean",
+ {"Init": {"side_effect": self.init_class}},
+ clean.remove_artifacts,
+ remove_logs=False,
+ )
+ self.assertEqual(0, retcode)
+ for expected_dir in dirs[:2]:
+ self.assertTrue(
+ os.path.exists(expected_dir),
+ "Missing {0} dir".format(expected_dir),
+ )
+ for deleted_dir in dirs[2:]:
+ self.assertFalse(
+ os.path.exists(deleted_dir),
+ "Unexpected {0} dir".format(deleted_dir),
+ )
+
+ def test_remove_artifacts_removes_artifacts_removes_seed(self):
+ """remove_artifacts removes seed dir when remove_seed is True."""
+ dirs = [
+ self.artifact_dir,
+ os.path.join(self.artifact_dir, "seed"),
+ os.path.join(self.artifact_dir, "dir1"),
+ os.path.join(self.artifact_dir, "dir2"),
+ ]
+ for _dir in dirs:
+ ensure_dir(_dir)
+
+ retcode = wrap_and_call(
+ "cloudinit.cmd.clean",
+ {"Init": {"side_effect": self.init_class}},
+ clean.remove_artifacts,
+ remove_logs=False,
+ remove_seed=True,
+ )
+ self.assertEqual(0, retcode)
+ self.assertTrue(
+ os.path.exists(self.artifact_dir), "Missing artifact dir"
+ )
+ for deleted_dir in dirs[1:]:
+ self.assertFalse(
+ os.path.exists(deleted_dir),
+ "Unexpected {0} dir".format(deleted_dir),
+ )
+
+ def test_remove_artifacts_returns_one_on_errors(self):
+ """remove_artifacts returns non-zero on failure and prints an error."""
+ ensure_dir(self.artifact_dir)
+ ensure_dir(os.path.join(self.artifact_dir, "dir1"))
+
+ with mock.patch("sys.stderr", new_callable=StringIO) as m_stderr:
+ retcode = wrap_and_call(
+ "cloudinit.cmd.clean",
+ {
+ "del_dir": {"side_effect": OSError("oops")},
+ "Init": {"side_effect": self.init_class},
+ },
+ clean.remove_artifacts,
+ remove_logs=False,
+ )
+ self.assertEqual(1, retcode)
+ self.assertEqual(
+ "Error:\nCould not remove %s/dir1: oops\n" % self.artifact_dir,
+ m_stderr.getvalue(),
+ )
+
+ def test_handle_clean_args_reboots(self):
+ """handle_clean_args_reboots when reboot arg is provided."""
+
+ called_cmds = []
+
+ def fake_subp(cmd, capture):
+ called_cmds.append((cmd, capture))
+ return "", ""
+
+ myargs = namedtuple("MyArgs", "remove_logs remove_seed reboot")
+ cmdargs = myargs(remove_logs=False, remove_seed=False, reboot=True)
+ retcode = wrap_and_call(
+ "cloudinit.cmd.clean",
+ {
+ "subp": {"side_effect": fake_subp},
+ "Init": {"side_effect": self.init_class},
+ },
+ clean.handle_clean_args,
+ name="does not matter",
+ args=cmdargs,
+ )
+ self.assertEqual(0, retcode)
+ self.assertEqual([(["shutdown", "-r", "now"], False)], called_cmds)
+
+ def test_status_main(self):
+ """clean.main can be run as a standalone script."""
+ write_file(self.log1, "cloud-init-log")
+ with self.assertRaises(SystemExit) as context_manager:
+ wrap_and_call(
+ "cloudinit.cmd.clean",
+ {
+ "Init": {"side_effect": self.init_class},
+ "sys.argv": {"new": ["clean", "--logs"]},
+ },
+ clean.main,
+ )
+
+ self.assertEqual(0, context_manager.exception.code)
+ self.assertFalse(
+ os.path.exists(self.log1), "Unexpected log {0}".format(self.log1)
+ )
+
+
+# vi: ts=4 expandtab syntax=python
diff --git a/tests/unittests/cmd/test_cloud_id.py b/tests/unittests/cmd/test_cloud_id.py
new file mode 100644
index 00000000..907297a6
--- /dev/null
+++ b/tests/unittests/cmd/test_cloud_id.py
@@ -0,0 +1,187 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Tests for cloud-id command line utility."""
+
+from collections import namedtuple
+
+import pytest
+
+from cloudinit import util
+from cloudinit.cmd import cloud_id
+from tests.unittests.helpers import mock
+
+M_PATH = "cloudinit.cmd.cloud_id."
+
+
+class TestCloudId:
+
+ args = namedtuple("cloudidargs", "instance_data json long")
+
+ def test_cloud_id_arg_parser_defaults(self):
+ """Validate the argument defaults when not provided by the end-user."""
+ cmd = ["cloud-id"]
+ with mock.patch("sys.argv", cmd):
+ args = cloud_id.get_parser().parse_args()
+ assert "/run/cloud-init/instance-data.json" == args.instance_data
+ assert False is args.long
+ assert False is args.json
+
+ def test_cloud_id_arg_parse_overrides(self, tmpdir):
+ """Override argument defaults by specifying values for each param."""
+ instance_data = tmpdir.join("instance-data.json")
+ instance_data.write("{}")
+ cmd = [
+ "cloud-id",
+ "--instance-data",
+ instance_data.strpath,
+ "--long",
+ "--json",
+ ]
+ with mock.patch("sys.argv", cmd):
+ args = cloud_id.get_parser().parse_args()
+ assert instance_data.strpath == args.instance_data
+ assert True is args.long
+ assert True is args.json
+
+ @mock.patch(M_PATH + "get_status_details")
+ def test_cloud_id_missing_instance_data_json(
+ self, get_status_details, tmpdir, capsys
+ ):
+ """Exit error when the provided instance-data.json does not exist."""
+ get_status_details.return_value = cloud_id.UXAppStatus.DONE, "n/a", ""
+ instance_data = tmpdir.join("instance-data.json")
+ cmd = ["cloud-id", "--instance-data", instance_data.strpath]
+ with mock.patch("sys.argv", cmd):
+ with pytest.raises(SystemExit) as context_manager:
+ cloud_id.main()
+ assert 1 == context_manager.value.code
+ _out, err = capsys.readouterr()
+ assert "Error:\nFile not found '%s'" % instance_data.strpath in err
+
+ @mock.patch(M_PATH + "get_status_details")
+ def test_cloud_id_non_json_instance_data(
+ self, get_status_details, tmpdir, capsys
+ ):
+ """Exit error when the provided instance-data.json is not json."""
+ get_status_details.return_value = cloud_id.UXAppStatus.DONE, "n/a", ""
+ instance_data = tmpdir.join("instance-data.json")
+ cmd = ["cloud-id", "--instance-data", instance_data.strpath]
+ instance_data.write("{")
+ with mock.patch("sys.argv", cmd):
+ with pytest.raises(SystemExit) as context_manager:
+ cloud_id.main()
+ assert 1 == context_manager.value.code
+ _out, err = capsys.readouterr()
+ assert (
+ "Error:\nFile '%s' is not valid json." % instance_data.strpath
+ in err
+ )
+
+ @mock.patch(M_PATH + "get_status_details")
+ def test_cloud_id_from_cloud_name_in_instance_data(
+ self, get_status_details, tmpdir, capsys
+ ):
+ """Report canonical cloud-id from cloud_name in instance-data."""
+ instance_data = tmpdir.join("instance-data.json")
+ get_status_details.return_value = cloud_id.UXAppStatus.DONE, "n/a", ""
+ instance_data.write(
+ '{"v1": {"cloud_name": "mycloud", "region": "somereg"}}',
+ )
+ cmd = ["cloud-id", "--instance-data", instance_data.strpath]
+ with mock.patch("sys.argv", cmd):
+ with pytest.raises(SystemExit) as context_manager:
+ cloud_id.main()
+ assert 0 == context_manager.value.code
+ out, _err = capsys.readouterr()
+ assert "mycloud\n" == out
+
+ @mock.patch(M_PATH + "get_status_details")
+ def test_cloud_id_long_name_from_instance_data(
+ self, get_status_details, tmpdir, capsys
+ ):
+ """Report long cloud-id format from cloud_name and region."""
+ get_status_details.return_value = cloud_id.UXAppStatus.DONE, "n/a", ""
+ instance_data = tmpdir.join("instance-data.json")
+ instance_data.write(
+ '{"v1": {"cloud_name": "mycloud", "region": "somereg"}}',
+ )
+ cmd = ["cloud-id", "--instance-data", instance_data.strpath, "--long"]
+ with mock.patch("sys.argv", cmd):
+ with pytest.raises(SystemExit) as context_manager:
+ cloud_id.main()
+ out, _err = capsys.readouterr()
+ assert 0 == context_manager.value.code
+ assert "mycloud\tsomereg\n" == out
+
+ @mock.patch(M_PATH + "get_status_details")
+ def test_cloud_id_lookup_from_instance_data_region(
+ self, get_status_details, tmpdir, capsys
+ ):
+ """Report discovered canonical cloud_id when region lookup matches."""
+ get_status_details.return_value = cloud_id.UXAppStatus.DONE, "n/a", ""
+ instance_data = tmpdir.join("instance-data.json")
+ instance_data.write(
+ '{"v1": {"cloud_name": "aws", "region": "cn-north-1",'
+ ' "platform": "ec2"}}',
+ )
+ cmd = ["cloud-id", "--instance-data", instance_data.strpath, "--long"]
+ with mock.patch("sys.argv", cmd):
+ with pytest.raises(SystemExit) as context_manager:
+ cloud_id.main()
+ assert 0 == context_manager.value.code
+ out, _err = capsys.readouterr()
+ assert "aws-china\tcn-north-1\n" == out
+
+ @mock.patch(M_PATH + "get_status_details")
+ def test_cloud_id_lookup_json_instance_data_adds_cloud_id_to_json(
+ self, get_status_details, tmpdir, capsys
+ ):
+ """Report v1 instance-data content with cloud_id when --json set."""
+ get_status_details.return_value = cloud_id.UXAppStatus.DONE, "n/a", ""
+ instance_data = tmpdir.join("instance-data.json")
+ instance_data.write(
+ '{"v1": {"cloud_name": "unknown", "region": "dfw",'
+ ' "platform": "openstack", "public_ssh_keys": []}}',
+ )
+ expected = util.json_dumps(
+ {
+ "cloud_id": "openstack",
+ "cloud_name": "unknown",
+ "platform": "openstack",
+ "public_ssh_keys": [],
+ "region": "dfw",
+ }
+ )
+ cmd = ["cloud-id", "--instance-data", instance_data.strpath, "--json"]
+ with mock.patch("sys.argv", cmd):
+ with pytest.raises(SystemExit) as context_manager:
+ cloud_id.main()
+ out, _err = capsys.readouterr()
+ assert 0 == context_manager.value.code
+ assert expected + "\n" == out
+
+ @pytest.mark.parametrize(
+ "status, exit_code",
+ (
+ (cloud_id.UXAppStatus.DISABLED, 2),
+ (cloud_id.UXAppStatus.NOT_RUN, 3),
+ (cloud_id.UXAppStatus.RUNNING, 0),
+ ),
+ )
+ @mock.patch(M_PATH + "get_status_details")
+ def test_cloud_id_unique_exit_codes_for_status(
+ self, get_status_details, status, exit_code, tmpdir, capsys
+ ):
+ """cloud-id returns unique exit codes for status."""
+ get_status_details.return_value = status, "n/a", ""
+ instance_data = tmpdir.join("instance-data.json")
+ if status == cloud_id.UXAppStatus.RUNNING:
+ instance_data.write("{}")
+ cmd = ["cloud-id", "--instance-data", instance_data.strpath, "--json"]
+ with mock.patch("sys.argv", cmd):
+ with pytest.raises(SystemExit) as context_manager:
+ cloud_id.main()
+ assert exit_code == context_manager.value.code
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/cmd/test_main.py b/tests/unittests/cmd/test_main.py
new file mode 100644
index 00000000..3e778b0b
--- /dev/null
+++ b/tests/unittests/cmd/test_main.py
@@ -0,0 +1,241 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import copy
+import os
+from collections import namedtuple
+from io import StringIO
+from unittest import mock
+
+import pytest
+
+from cloudinit import safeyaml
+from cloudinit.cmd import main
+from cloudinit.util import ensure_dir, load_file, write_file
+from tests.unittests.helpers import FilesystemMockingTestCase, wrap_and_call
+
+mypaths = namedtuple("MyPaths", "run_dir")
+myargs = namedtuple("MyArgs", "debug files force local reporter subcommand")
+
+
+class TestMain(FilesystemMockingTestCase):
+ with_logs = True
+ allowed_subp = False
+
+ def setUp(self):
+ super(TestMain, self).setUp()
+ self.new_root = self.tmp_dir()
+ self.cloud_dir = self.tmp_path("var/lib/cloud/", dir=self.new_root)
+ os.makedirs(self.cloud_dir)
+ self.replicateTestRoot("simple_ubuntu", self.new_root)
+ self.cfg = {
+ "datasource_list": ["None"],
+ "runcmd": ["ls /etc"], # test ALL_DISTROS
+ "system_info": {
+ "paths": {
+ "cloud_dir": self.cloud_dir,
+ "run_dir": self.new_root,
+ }
+ },
+ "write_files": [
+ {
+ "path": "/etc/blah.ini",
+ "content": "blah",
+ "permissions": 0o755,
+ },
+ ],
+ "cloud_init_modules": ["write-files", "runcmd"],
+ }
+ cloud_cfg = safeyaml.dumps(self.cfg)
+ ensure_dir(os.path.join(self.new_root, "etc", "cloud"))
+ self.cloud_cfg_file = os.path.join(
+ self.new_root, "etc", "cloud", "cloud.cfg"
+ )
+ write_file(self.cloud_cfg_file, cloud_cfg)
+ self.patchOS(self.new_root)
+ self.patchUtils(self.new_root)
+ self.stderr = StringIO()
+ self.patchStdoutAndStderr(stderr=self.stderr)
+
+ def test_main_init_run_net_stops_on_file_no_net(self):
+ """When no-net file is present, main_init does not process modules."""
+ stop_file = os.path.join(self.cloud_dir, "data", "no-net") # stop file
+ write_file(stop_file, "")
+ cmdargs = myargs(
+ debug=False,
+ files=None,
+ force=False,
+ local=False,
+ reporter=None,
+ subcommand="init",
+ )
+ (_item1, item2) = wrap_and_call(
+ "cloudinit.cmd.main",
+ {
+ "util.close_stdin": True,
+ "netinfo.debug_info": "my net debug info",
+ "util.fixup_output": ("outfmt", "errfmt"),
+ },
+ main.main_init,
+ "init",
+ cmdargs,
+ )
+ # We should not run write_files module
+ self.assertFalse(
+ os.path.exists(os.path.join(self.new_root, "etc/blah.ini")),
+ "Unexpected run of write_files module produced blah.ini",
+ )
+ self.assertEqual([], item2)
+ # Instancify is called
+ instance_id_path = "var/lib/cloud/data/instance-id"
+ self.assertFalse(
+ os.path.exists(os.path.join(self.new_root, instance_id_path)),
+ "Unexpected call to datasource.instancify produced instance-id",
+ )
+ expected_logs = [
+ "Exiting. stop file ['{stop_file}'] existed\n".format(
+ stop_file=stop_file
+ ),
+ "my net debug info", # netinfo.debug_info
+ ]
+ for log in expected_logs:
+ self.assertIn(log, self.stderr.getvalue())
+
+ def test_main_init_run_net_runs_modules(self):
+ """Modules like write_files are run in 'net' mode."""
+ cmdargs = myargs(
+ debug=False,
+ files=None,
+ force=False,
+ local=False,
+ reporter=None,
+ subcommand="init",
+ )
+ (_item1, item2) = wrap_and_call(
+ "cloudinit.cmd.main",
+ {
+ "util.close_stdin": True,
+ "netinfo.debug_info": "my net debug info",
+ "util.fixup_output": ("outfmt", "errfmt"),
+ },
+ main.main_init,
+ "init",
+ cmdargs,
+ )
+ self.assertEqual([], item2)
+ # Instancify is called
+ instance_id_path = "var/lib/cloud/data/instance-id"
+ self.assertEqual(
+ "iid-datasource-none\n",
+ os.path.join(
+ load_file(os.path.join(self.new_root, instance_id_path))
+ ),
+ )
+ # modules are run (including write_files)
+ self.assertEqual(
+ "blah", load_file(os.path.join(self.new_root, "etc/blah.ini"))
+ )
+ expected_logs = [
+ "network config is disabled by fallback", # apply_network_config
+ "my net debug info", # netinfo.debug_info
+ "no previous run detected",
+ ]
+ for log in expected_logs:
+ self.assertIn(log, self.stderr.getvalue())
+
+ def test_main_init_run_net_calls_set_hostname_when_metadata_present(self):
+ """When local-hostname metadata is present, call cc_set_hostname."""
+ self.cfg["datasource"] = {
+ "None": {"metadata": {"local-hostname": "md-hostname"}}
+ }
+ cloud_cfg = safeyaml.dumps(self.cfg)
+ write_file(self.cloud_cfg_file, cloud_cfg)
+ cmdargs = myargs(
+ debug=False,
+ files=None,
+ force=False,
+ local=False,
+ reporter=None,
+ subcommand="init",
+ )
+
+ def set_hostname(name, cfg, cloud, log, args):
+ self.assertEqual("set-hostname", name)
+ updated_cfg = copy.deepcopy(self.cfg)
+ updated_cfg.update(
+ {
+ "def_log_file": "/var/log/cloud-init.log",
+ "log_cfgs": [],
+ "syslog_fix_perms": [
+ "syslog:adm",
+ "root:adm",
+ "root:wheel",
+ "root:root",
+ ],
+ "vendor_data": {"enabled": True, "prefix": []},
+ "vendor_data2": {"enabled": True, "prefix": []},
+ }
+ )
+ updated_cfg.pop("system_info")
+
+ self.assertEqual(updated_cfg, cfg)
+ self.assertEqual(main.LOG, log)
+ self.assertIsNone(args)
+
+ (_item1, item2) = wrap_and_call(
+ "cloudinit.cmd.main",
+ {
+ "util.close_stdin": True,
+ "netinfo.debug_info": "my net debug info",
+ "cc_set_hostname.handle": {"side_effect": set_hostname},
+ "util.fixup_output": ("outfmt", "errfmt"),
+ },
+ main.main_init,
+ "init",
+ cmdargs,
+ )
+ self.assertEqual([], item2)
+ # Instancify is called
+ instance_id_path = "var/lib/cloud/data/instance-id"
+ self.assertEqual(
+ "iid-datasource-none\n",
+ os.path.join(
+ load_file(os.path.join(self.new_root, instance_id_path))
+ ),
+ )
+ # modules are run (including write_files)
+ self.assertEqual(
+ "blah", load_file(os.path.join(self.new_root, "etc/blah.ini"))
+ )
+ expected_logs = [
+ "network config is disabled by fallback", # apply_network_config
+ "my net debug info", # netinfo.debug_info
+ "no previous run detected",
+ ]
+ for log in expected_logs:
+ self.assertIn(log, self.stderr.getvalue())
+
+
+class TestShouldBringUpInterfaces:
+ @pytest.mark.parametrize(
+ "cfg_disable,args_local,expected",
+ [
+ (True, True, False),
+ (True, False, False),
+ (False, True, False),
+ (False, False, True),
+ ],
+ )
+ def test_should_bring_up_interfaces(
+ self, cfg_disable, args_local, expected
+ ):
+ init = mock.Mock()
+ init.cfg = {"disable_network_activation": cfg_disable}
+
+ args = mock.Mock()
+ args.local = args_local
+
+ result = main._should_bring_up_interfaces(init, args)
+ assert result == expected
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/cmd/test_query.py b/tests/unittests/cmd/test_query.py
new file mode 100644
index 00000000..03a73bb5
--- /dev/null
+++ b/tests/unittests/cmd/test_query.py
@@ -0,0 +1,537 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import errno
+import gzip
+import json
+import os
+from collections import namedtuple
+from io import BytesIO
+from textwrap import dedent
+
+import pytest
+
+from cloudinit.cmd import query
+from cloudinit.helpers import Paths
+from cloudinit.sources import (
+ INSTANCE_JSON_FILE,
+ INSTANCE_JSON_SENSITIVE_FILE,
+ REDACT_SENSITIVE_VALUE,
+)
+from cloudinit.util import b64e, write_file
+from tests.unittests.helpers import mock
+
+
+def _gzip_data(data):
+ with BytesIO() as iobuf:
+ with gzip.GzipFile(mode="wb", fileobj=iobuf) as gzfp:
+ gzfp.write(data)
+ return iobuf.getvalue()
+
+
+@mock.patch("cloudinit.cmd.query.addLogHandlerCLI", lambda *args: "")
+class TestQuery:
+
+ args = namedtuple(
+ "queryargs",
+ "debug dump_all format instance_data list_keys user_data vendor_data"
+ " varname",
+ )
+
+ def _setup_paths(self, tmpdir, ud_val=None, vd_val=None):
+ """Write userdata and vendordata into a tmpdir.
+
+ Return:
+ 4-tuple : (paths, run_dir_path, userdata_path, vendordata_path)
+ """
+ if ud_val:
+ user_data = tmpdir.join("user-data")
+ write_file(user_data.strpath, ud_val)
+ else:
+ user_data = None
+ if vd_val:
+ vendor_data = tmpdir.join("vendor-data")
+ write_file(vendor_data.strpath, vd_val)
+ else:
+ vendor_data = None
+ run_dir = tmpdir.join("run_dir")
+ run_dir.ensure_dir()
+
+ cloud_dir = tmpdir.join("cloud_dir")
+ cloud_dir.ensure_dir()
+
+ return (
+ Paths(
+ {"cloud_dir": cloud_dir.strpath, "run_dir": run_dir.strpath}
+ ),
+ run_dir,
+ user_data,
+ vendor_data,
+ )
+
+ def test_handle_args_error_on_missing_param(self, caplog, capsys):
+ """Error when missing required parameters and print usage."""
+ args = self.args(
+ debug=False,
+ dump_all=False,
+ format=None,
+ instance_data=None,
+ list_keys=False,
+ user_data=None,
+ vendor_data=None,
+ varname=None,
+ )
+ with mock.patch(
+ "cloudinit.cmd.query.addLogHandlerCLI", return_value=""
+ ) as m_cli_log:
+ assert 1 == query.handle_args("anyname", args)
+ expected_error = (
+ "Expected one of the options: --all, --format, --list-keys"
+ " or varname\n"
+ )
+ assert expected_error in caplog.text
+ out, _err = capsys.readouterr()
+ assert "usage: query" in out
+ assert 1 == m_cli_log.call_count
+
+ @pytest.mark.parametrize(
+ "inst_data,varname,expected_error",
+ (
+ (
+ '{"v1": {"key-2": "value-2"}}',
+ "v1.absent_leaf",
+ "instance-data 'v1' has no 'absent_leaf'\n",
+ ),
+ (
+ '{"v1": {"key-2": "value-2"}}',
+ "absent_key",
+ "Undefined instance-data key 'absent_key'\n",
+ ),
+ ),
+ )
+ def test_handle_args_error_on_invalid_vaname_paths(
+ self, inst_data, varname, expected_error, caplog, tmpdir
+ ):
+ """Error when varname is not a valid instance-data variable path."""
+ instance_data = tmpdir.join("instance-data")
+ instance_data.write(inst_data)
+ args = self.args(
+ debug=False,
+ dump_all=False,
+ format=None,
+ instance_data=instance_data.strpath,
+ list_keys=False,
+ user_data=None,
+ vendor_data=None,
+ varname=varname,
+ )
+ paths, _, _, _ = self._setup_paths(tmpdir)
+ with mock.patch("cloudinit.cmd.query.read_cfg_paths") as m_paths:
+ m_paths.return_value = paths
+ with mock.patch(
+ "cloudinit.cmd.query.addLogHandlerCLI", return_value=""
+ ):
+ with mock.patch("cloudinit.cmd.query.load_userdata") as m_lud:
+ m_lud.return_value = "ud"
+ assert 1 == query.handle_args("anyname", args)
+ assert expected_error in caplog.text
+
+ def test_handle_args_error_on_missing_instance_data(self, caplog, tmpdir):
+ """When instance_data file path does not exist, log an error."""
+ absent_fn = tmpdir.join("absent")
+ args = self.args(
+ debug=False,
+ dump_all=True,
+ format=None,
+ instance_data=absent_fn.strpath,
+ list_keys=False,
+ user_data="ud",
+ vendor_data="vd",
+ varname=None,
+ )
+ assert 1 == query.handle_args("anyname", args)
+
+ msg = "Missing instance-data file: %s" % absent_fn
+ assert msg in caplog.text
+
+ def test_handle_args_error_when_no_read_permission_instance_data(
+ self, caplog, tmpdir
+ ):
+ """When instance_data file is unreadable, log an error."""
+ noread_fn = tmpdir.join("unreadable")
+ noread_fn.write("thou shall not pass")
+ args = self.args(
+ debug=False,
+ dump_all=True,
+ format=None,
+ instance_data=noread_fn.strpath,
+ list_keys=False,
+ user_data="ud",
+ vendor_data="vd",
+ varname=None,
+ )
+ with mock.patch("cloudinit.cmd.query.util.load_file") as m_load:
+ m_load.side_effect = OSError(errno.EACCES, "Not allowed")
+ assert 1 == query.handle_args("anyname", args)
+ msg = "No read permission on '%s'. Try sudo" % noread_fn
+ assert msg in caplog.text
+
+ def test_handle_args_defaults_instance_data(self, caplog, tmpdir):
+ """When no instance_data argument, default to configured run_dir."""
+ args = self.args(
+ debug=False,
+ dump_all=True,
+ format=None,
+ instance_data=None,
+ list_keys=False,
+ user_data=None,
+ vendor_data=None,
+ varname=None,
+ )
+ paths, run_dir, _, _ = self._setup_paths(tmpdir)
+ with mock.patch("cloudinit.cmd.query.read_cfg_paths") as m_paths:
+ m_paths.return_value = paths
+ assert 1 == query.handle_args("anyname", args)
+ json_file = run_dir.join(INSTANCE_JSON_FILE)
+ msg = "Missing instance-data file: %s" % json_file.strpath
+ assert msg in caplog.text
+
+ def test_handle_args_root_fallsback_to_instance_data(self, caplog, tmpdir):
+ """When no instance_data argument, root falls back to redacted json."""
+ args = self.args(
+ debug=False,
+ dump_all=True,
+ format=None,
+ instance_data=None,
+ list_keys=False,
+ user_data=None,
+ vendor_data=None,
+ varname=None,
+ )
+ paths, run_dir, _, _ = self._setup_paths(tmpdir)
+ with mock.patch("cloudinit.cmd.query.read_cfg_paths") as m_paths:
+ m_paths.return_value = paths
+ with mock.patch("os.getuid") as m_getuid:
+ m_getuid.return_value = 0
+ assert 1 == query.handle_args("anyname", args)
+ json_file = run_dir.join(INSTANCE_JSON_FILE)
+ sensitive_file = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE)
+ msg = "Missing root-readable %s. Using redacted %s instead." % (
+ sensitive_file.strpath,
+ json_file.strpath,
+ )
+ assert msg in caplog.text
+
+ @pytest.mark.parametrize(
+ "ud_src,ud_expected,vd_src,vd_expected",
+ (
+ ("hi mom", "hi mom", "hi pops", "hi pops"),
+ ("ud".encode("utf-8"), "ud", "vd".encode("utf-8"), "vd"),
+ (_gzip_data(b"ud"), "ud", _gzip_data(b"vd"), "vd"),
+ (_gzip_data("ud".encode("utf-8")), "ud", _gzip_data(b"vd"), "vd"),
+ ),
+ )
+ def test_handle_args_root_processes_user_data(
+ self, ud_src, ud_expected, vd_src, vd_expected, capsys, tmpdir
+ ):
+ """Support reading multiple user-data file content types"""
+ paths, run_dir, user_data, vendor_data = self._setup_paths(
+ tmpdir, ud_val=ud_src, vd_val=vd_src
+ )
+ sensitive_file = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE)
+ sensitive_file.write('{"my-var": "it worked"}')
+ args = self.args(
+ debug=False,
+ dump_all=True,
+ format=None,
+ instance_data=None,
+ list_keys=False,
+ user_data=user_data.strpath,
+ vendor_data=vendor_data.strpath,
+ varname=None,
+ )
+ with mock.patch("cloudinit.cmd.query.read_cfg_paths") as m_paths:
+ m_paths.return_value = paths
+ with mock.patch("os.getuid") as m_getuid:
+ m_getuid.return_value = 0
+ assert 0 == query.handle_args("anyname", args)
+ out, _err = capsys.readouterr()
+ cmd_output = json.loads(out)
+ assert "it worked" == cmd_output["my-var"]
+ if ud_expected == "ci-b64:":
+ ud_expected = "ci-b64:{}".format(b64e(ud_src))
+ if vd_expected == "ci-b64:":
+ vd_expected = "ci-b64:{}".format(b64e(vd_src))
+ assert ud_expected == cmd_output["userdata"]
+ assert vd_expected == cmd_output["vendordata"]
+
+ def test_handle_args_user_vendor_data_defaults_to_instance_link(
+ self, capsys, tmpdir
+ ):
+ """When no instance_data argument, root uses sensitive json."""
+ paths, run_dir, _, _ = self._setup_paths(tmpdir)
+ sensitive_file = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE)
+ sensitive_file.write('{"my-var": "it worked"}')
+
+ ud_path = os.path.join(paths.instance_link, "user-data.txt")
+ write_file(ud_path, "instance_link_ud")
+ vd_path = os.path.join(paths.instance_link, "vendor-data.txt")
+ write_file(vd_path, "instance_link_vd")
+
+ args = self.args(
+ debug=False,
+ dump_all=True,
+ format=None,
+ instance_data=None,
+ list_keys=False,
+ user_data=None,
+ vendor_data=None,
+ varname=None,
+ )
+ with mock.patch("cloudinit.cmd.query.read_cfg_paths") as m_paths:
+ m_paths.return_value = paths
+ with mock.patch("os.getuid", return_value=0):
+ assert 0 == query.handle_args("anyname", args)
+ expected = (
+ '{\n "my-var": "it worked",\n '
+ '"userdata": "instance_link_ud",\n '
+ '"vendordata": "instance_link_vd"\n}\n'
+ )
+ out, _ = capsys.readouterr()
+ assert expected == out
+
+ def test_handle_args_root_uses_instance_sensitive_data(
+ self, capsys, tmpdir
+ ):
+ """When no instance_data argument, root uses sensitive json."""
+ paths, run_dir, user_data, vendor_data = self._setup_paths(
+ tmpdir, ud_val="ud", vd_val="vd"
+ )
+ sensitive_file = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE)
+ sensitive_file.write('{"my-var": "it worked"}')
+ args = self.args(
+ debug=False,
+ dump_all=True,
+ format=None,
+ instance_data=None,
+ list_keys=False,
+ user_data=user_data.strpath,
+ vendor_data=vendor_data.strpath,
+ varname=None,
+ )
+ with mock.patch("cloudinit.cmd.query.read_cfg_paths") as m_paths:
+ m_paths.return_value = paths
+ with mock.patch("os.getuid") as m_getuid:
+ m_getuid.return_value = 0
+ assert 0 == query.handle_args("anyname", args)
+ expected = (
+ '{\n "my-var": "it worked",\n '
+ '"userdata": "ud",\n "vendordata": "vd"\n}\n'
+ )
+ out, _err = capsys.readouterr()
+ assert expected == out
+
+ def test_handle_args_dumps_all_instance_data(self, capsys, tmpdir):
+ """When --all is specified query will dump all instance data vars."""
+ instance_data = tmpdir.join("instance-data")
+ instance_data.write('{"my-var": "it worked"}')
+ args = self.args(
+ debug=False,
+ dump_all=True,
+ format=None,
+ instance_data=instance_data.strpath,
+ list_keys=False,
+ user_data="ud",
+ vendor_data="vd",
+ varname=None,
+ )
+ with mock.patch("os.getuid") as m_getuid:
+ m_getuid.return_value = 100
+ assert 0 == query.handle_args("anyname", args)
+ expected = (
+ '{\n "my-var": "it worked",\n "userdata": "<%s> file:ud",\n'
+ ' "vendordata": "<%s> file:vd"\n}\n'
+ % (REDACT_SENSITIVE_VALUE, REDACT_SENSITIVE_VALUE)
+ )
+ out, _err = capsys.readouterr()
+ assert expected == out
+
+ def test_handle_args_returns_top_level_varname(self, capsys, tmpdir):
+ """When the argument varname is passed, report its value."""
+ instance_data = tmpdir.join("instance-data")
+ instance_data.write('{"my-var": "it worked"}')
+ args = self.args(
+ debug=False,
+ dump_all=True,
+ format=None,
+ instance_data=instance_data.strpath,
+ list_keys=False,
+ user_data="ud",
+ vendor_data="vd",
+ varname="my_var",
+ )
+ with mock.patch("os.getuid") as m_getuid:
+ m_getuid.return_value = 100
+ assert 0 == query.handle_args("anyname", args)
+ out, _err = capsys.readouterr()
+ assert "it worked\n" == out
+
+ @pytest.mark.parametrize(
+ "inst_data,varname,expected",
+ (
+ (
+ '{"v1": {"key-2": "value-2"}, "my-var": "it worked"}',
+ "v1.key_2",
+ "value-2\n",
+ ),
+ # Assert no jinja underscore-delimited aliases are reported on CLI
+ (
+ '{"v1": {"something-hyphenated": {"no.underscores":"x",'
+ ' "no-alias": "y"}}, "my-var": "it worked"}',
+ "v1.something_hyphenated",
+ '{\n "no-alias": "y",\n "no.underscores": "x"\n}\n',
+ ),
+ ),
+ )
+ def test_handle_args_returns_nested_varname(
+ self, inst_data, varname, expected, capsys, tmpdir
+ ):
+ """If user_data file is a jinja template render instance-data vars."""
+ instance_data = tmpdir.join("instance-data")
+ instance_data.write(inst_data)
+ args = self.args(
+ debug=False,
+ dump_all=False,
+ format=None,
+ instance_data=instance_data.strpath,
+ user_data="ud",
+ vendor_data="vd",
+ list_keys=False,
+ varname=varname,
+ )
+ with mock.patch("os.getuid") as m_getuid:
+ m_getuid.return_value = 100
+ assert 0 == query.handle_args("anyname", args)
+ out, _err = capsys.readouterr()
+ assert expected == out
+
+ def test_handle_args_returns_standardized_vars_to_top_level_aliases(
+ self, capsys, tmpdir
+ ):
+ """Any standardized vars under v# are promoted as top-level aliases."""
+ instance_data = tmpdir.join("instance-data")
+ instance_data.write(
+ '{"v1": {"v1_1": "val1.1"}, "v2": {"v2_2": "val2.2"},'
+ ' "top": "gun"}'
+ )
+ expected = dedent(
+ """\
+ {
+ "top": "gun",
+ "userdata": "<redacted for non-root user> file:ud",
+ "v1": {
+ "v1_1": "val1.1"
+ },
+ "v1_1": "val1.1",
+ "v2": {
+ "v2_2": "val2.2"
+ },
+ "v2_2": "val2.2",
+ "vendordata": "<redacted for non-root user> file:vd"
+ }
+ """
+ )
+ args = self.args(
+ debug=False,
+ dump_all=True,
+ format=None,
+ instance_data=instance_data.strpath,
+ user_data="ud",
+ vendor_data="vd",
+ list_keys=False,
+ varname=None,
+ )
+ with mock.patch("os.getuid") as m_getuid:
+ m_getuid.return_value = 100
+ assert 0 == query.handle_args("anyname", args)
+ out, _err = capsys.readouterr()
+ assert expected == out
+
+ def test_handle_args_list_keys_sorts_top_level_keys_when_no_varname(
+ self, capsys, tmpdir
+ ):
+ """Sort all top-level keys when only --list-keys provided."""
+ instance_data = tmpdir.join("instance-data")
+ instance_data.write(
+ '{"v1": {"v1_1": "val1.1"}, "v2": {"v2_2": "val2.2"},'
+ ' "top": "gun"}'
+ )
+ expected = "top\nuserdata\nv1\nv1_1\nv2\nv2_2\nvendordata\n"
+ args = self.args(
+ debug=False,
+ dump_all=False,
+ format=None,
+ instance_data=instance_data.strpath,
+ list_keys=True,
+ user_data="ud",
+ vendor_data="vd",
+ varname=None,
+ )
+ with mock.patch("os.getuid") as m_getuid:
+ m_getuid.return_value = 100
+ assert 0 == query.handle_args("anyname", args)
+ out, _err = capsys.readouterr()
+ assert expected == out
+
+ def test_handle_args_list_keys_sorts_nested_keys_when_varname(
+ self, capsys, tmpdir
+ ):
+ """Sort all nested keys of varname object when --list-keys provided."""
+ instance_data = tmpdir.join("instance-data")
+ instance_data.write(
+ '{"v1": {"v1_1": "val1.1", "v1_2": "val1.2"}, "v2":'
+ + ' {"v2_2": "val2.2"}, "top": "gun"}'
+ )
+ expected = "v1_1\nv1_2\n"
+ args = self.args(
+ debug=False,
+ dump_all=False,
+ format=None,
+ instance_data=instance_data.strpath,
+ list_keys=True,
+ user_data="ud",
+ vendor_data="vd",
+ varname="v1",
+ )
+ with mock.patch("os.getuid") as m_getuid:
+ m_getuid.return_value = 100
+ assert 0 == query.handle_args("anyname", args)
+ out, _err = capsys.readouterr()
+ assert expected == out
+
+ def test_handle_args_list_keys_errors_when_varname_is_not_a_dict(
+ self, caplog, tmpdir
+ ):
+ """Raise an error when --list-keys and varname specify a non-list."""
+ instance_data = tmpdir.join("instance-data")
+ instance_data.write(
+ '{"v1": {"v1_1": "val1.1", "v1_2": "val1.2"}, "v2": '
+ + '{"v2_2": "val2.2"}, "top": "gun"}'
+ )
+ expected_error = "--list-keys provided but 'top' is not a dict"
+ args = self.args(
+ debug=False,
+ dump_all=False,
+ format=None,
+ instance_data=instance_data.strpath,
+ list_keys=True,
+ user_data="ud",
+ vendor_data="vd",
+ varname="top",
+ )
+ with mock.patch("os.getuid") as m_getuid:
+ m_getuid.return_value = 100
+ assert 1 == query.handle_args("anyname", args)
+ assert expected_error in caplog.text
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/cmd/test_status.py b/tests/unittests/cmd/test_status.py
new file mode 100644
index 00000000..c5f424da
--- /dev/null
+++ b/tests/unittests/cmd/test_status.py
@@ -0,0 +1,548 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import os
+from collections import namedtuple
+from io import StringIO
+from textwrap import dedent
+
+from cloudinit.atomic_helper import write_json
+from cloudinit.cmd import status
+from cloudinit.util import ensure_file
+from tests.unittests.helpers import CiTestCase, mock, wrap_and_call
+
+mypaths = namedtuple("MyPaths", "run_dir")
+myargs = namedtuple("MyArgs", "long wait")
+
+
+class TestStatus(CiTestCase):
+ def setUp(self):
+ super(TestStatus, self).setUp()
+ self.new_root = self.tmp_dir()
+ self.status_file = self.tmp_path("status.json", self.new_root)
+ self.disable_file = self.tmp_path("cloudinit-disable", self.new_root)
+ self.paths = mypaths(run_dir=self.new_root)
+
+ class FakeInit(object):
+ paths = self.paths
+
+ def __init__(self, ds_deps):
+ pass
+
+ def read_cfg(self):
+ pass
+
+ self.init_class = FakeInit
+
+ def test__is_cloudinit_disabled_false_on_sysvinit(self):
+ """When not in an environment using systemd, return False."""
+ ensure_file(self.disable_file) # Create the ignored disable file
+ (is_disabled, reason) = wrap_and_call(
+ "cloudinit.cmd.status",
+ {
+ "uses_systemd": False,
+ "get_cmdline": "root=/dev/my-root not-important",
+ },
+ status._is_cloudinit_disabled,
+ self.disable_file,
+ self.paths,
+ )
+ self.assertFalse(
+ is_disabled, "expected enabled cloud-init on sysvinit"
+ )
+ self.assertEqual("Cloud-init enabled on sysvinit", reason)
+
+ def test__is_cloudinit_disabled_true_on_disable_file(self):
+ """When using systemd and disable_file is present return disabled."""
+ ensure_file(self.disable_file) # Create observed disable file
+ (is_disabled, reason) = wrap_and_call(
+ "cloudinit.cmd.status",
+ {
+ "uses_systemd": True,
+ "get_cmdline": "root=/dev/my-root not-important",
+ },
+ status._is_cloudinit_disabled,
+ self.disable_file,
+ self.paths,
+ )
+ self.assertTrue(is_disabled, "expected disabled cloud-init")
+ self.assertEqual(
+ "Cloud-init disabled by {0}".format(self.disable_file), reason
+ )
+
+ def test__is_cloudinit_disabled_false_on_kernel_cmdline_enable(self):
+ """Not disabled when using systemd and enabled via commandline."""
+ ensure_file(self.disable_file) # Create ignored disable file
+ (is_disabled, reason) = wrap_and_call(
+ "cloudinit.cmd.status",
+ {
+ "uses_systemd": True,
+ "get_cmdline": "something cloud-init=enabled else",
+ },
+ status._is_cloudinit_disabled,
+ self.disable_file,
+ self.paths,
+ )
+ self.assertFalse(is_disabled, "expected enabled cloud-init")
+ self.assertEqual(
+ "Cloud-init enabled by kernel command line cloud-init=enabled",
+ reason,
+ )
+
+ def test__is_cloudinit_disabled_true_on_kernel_cmdline(self):
+ """When kernel command line disables cloud-init return True."""
+ (is_disabled, reason) = wrap_and_call(
+ "cloudinit.cmd.status",
+ {
+ "uses_systemd": True,
+ "get_cmdline": "something cloud-init=disabled else",
+ },
+ status._is_cloudinit_disabled,
+ self.disable_file,
+ self.paths,
+ )
+ self.assertTrue(is_disabled, "expected disabled cloud-init")
+ self.assertEqual(
+ "Cloud-init disabled by kernel parameter cloud-init=disabled",
+ reason,
+ )
+
+ def test__is_cloudinit_disabled_true_when_generator_disables(self):
+ """When cloud-init-generator writes disabled file return True."""
+ disabled_file = os.path.join(self.paths.run_dir, "disabled")
+ ensure_file(disabled_file)
+ (is_disabled, reason) = wrap_and_call(
+ "cloudinit.cmd.status",
+ {"uses_systemd": True, "get_cmdline": "something"},
+ status._is_cloudinit_disabled,
+ self.disable_file,
+ self.paths,
+ )
+ self.assertTrue(is_disabled, "expected disabled cloud-init")
+ self.assertEqual("Cloud-init disabled by cloud-init-generator", reason)
+
+ def test__is_cloudinit_disabled_false_when_enabled_in_systemd(self):
+ """Report enabled when systemd generator creates the enabled file."""
+ enabled_file = os.path.join(self.paths.run_dir, "enabled")
+ ensure_file(enabled_file)
+ (is_disabled, reason) = wrap_and_call(
+ "cloudinit.cmd.status",
+ {"uses_systemd": True, "get_cmdline": "something ignored"},
+ status._is_cloudinit_disabled,
+ self.disable_file,
+ self.paths,
+ )
+ self.assertFalse(is_disabled, "expected enabled cloud-init")
+ self.assertEqual(
+ "Cloud-init enabled by systemd cloud-init-generator", reason
+ )
+
+ def test_status_returns_not_run(self):
+ """When status.json does not exist yet, return 'not run'."""
+ self.assertFalse(
+ os.path.exists(self.status_file), "Unexpected status.json found"
+ )
+ cmdargs = myargs(long=False, wait=False)
+ with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
+ retcode = wrap_and_call(
+ "cloudinit.cmd.status",
+ {
+ "_is_cloudinit_disabled": (False, ""),
+ "Init": {"side_effect": self.init_class},
+ },
+ status.handle_status_args,
+ "ignored",
+ cmdargs,
+ )
+ self.assertEqual(0, retcode)
+ self.assertEqual("status: not run\n", m_stdout.getvalue())
+
+ def test_status_returns_disabled_long_on_presence_of_disable_file(self):
+ """When cloudinit is disabled, return disabled reason."""
+
+ checked_files = []
+
+ def fakeexists(filepath):
+ checked_files.append(filepath)
+ status_file = os.path.join(self.paths.run_dir, "status.json")
+ return bool(not filepath == status_file)
+
+ cmdargs = myargs(long=True, wait=False)
+ with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
+ retcode = wrap_and_call(
+ "cloudinit.cmd.status",
+ {
+ "os.path.exists": {"side_effect": fakeexists},
+ "_is_cloudinit_disabled": (
+ True,
+ "disabled for some reason",
+ ),
+ "Init": {"side_effect": self.init_class},
+ },
+ status.handle_status_args,
+ "ignored",
+ cmdargs,
+ )
+ self.assertEqual(0, retcode)
+ self.assertEqual(
+ [os.path.join(self.paths.run_dir, "status.json")], checked_files
+ )
+ expected = dedent(
+ """\
+ status: disabled
+ detail:
+ disabled for some reason
+ """
+ )
+ self.assertEqual(expected, m_stdout.getvalue())
+
+ def test_status_returns_running_on_no_results_json(self):
+ """Report running when status.json exists but result.json does not."""
+ result_file = self.tmp_path("result.json", self.new_root)
+ write_json(self.status_file, {})
+ self.assertFalse(
+ os.path.exists(result_file), "Unexpected result.json found"
+ )
+ cmdargs = myargs(long=False, wait=False)
+ with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
+ retcode = wrap_and_call(
+ "cloudinit.cmd.status",
+ {
+ "_is_cloudinit_disabled": (False, ""),
+ "Init": {"side_effect": self.init_class},
+ },
+ status.handle_status_args,
+ "ignored",
+ cmdargs,
+ )
+ self.assertEqual(0, retcode)
+ self.assertEqual("status: running\n", m_stdout.getvalue())
+
+ def test_status_returns_running(self):
+ """Report running when status exists with an unfinished stage."""
+ ensure_file(self.tmp_path("result.json", self.new_root))
+ write_json(
+ self.status_file, {"v1": {"init": {"start": 1, "finished": None}}}
+ )
+ cmdargs = myargs(long=False, wait=False)
+ with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
+ retcode = wrap_and_call(
+ "cloudinit.cmd.status",
+ {
+ "_is_cloudinit_disabled": (False, ""),
+ "Init": {"side_effect": self.init_class},
+ },
+ status.handle_status_args,
+ "ignored",
+ cmdargs,
+ )
+ self.assertEqual(0, retcode)
+ self.assertEqual("status: running\n", m_stdout.getvalue())
+
+ def test_status_returns_done(self):
+ """Report done results.json exists no stages are unfinished."""
+ ensure_file(self.tmp_path("result.json", self.new_root))
+ write_json(
+ self.status_file,
+ {
+ "v1": {
+ "stage": None, # No current stage running
+ "datasource": (
+ "DataSourceNoCloud [seed=/var/.../seed/nocloud-net]"
+ "[dsmode=net]"
+ ),
+ "blah": {"finished": 123.456},
+ "init": {
+ "errors": [],
+ "start": 124.567,
+ "finished": 125.678,
+ },
+ "init-local": {"start": 123.45, "finished": 123.46},
+ }
+ },
+ )
+ cmdargs = myargs(long=False, wait=False)
+ with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
+ retcode = wrap_and_call(
+ "cloudinit.cmd.status",
+ {
+ "_is_cloudinit_disabled": (False, ""),
+ "Init": {"side_effect": self.init_class},
+ },
+ status.handle_status_args,
+ "ignored",
+ cmdargs,
+ )
+ self.assertEqual(0, retcode)
+ self.assertEqual("status: done\n", m_stdout.getvalue())
+
+ def test_status_returns_done_long(self):
+ """Long format of done status includes datasource info."""
+ ensure_file(self.tmp_path("result.json", self.new_root))
+ write_json(
+ self.status_file,
+ {
+ "v1": {
+ "stage": None,
+ "datasource": (
+ "DataSourceNoCloud [seed=/var/.../seed/nocloud-net]"
+ "[dsmode=net]"
+ ),
+ "init": {"start": 124.567, "finished": 125.678},
+ "init-local": {"start": 123.45, "finished": 123.46},
+ }
+ },
+ )
+ cmdargs = myargs(long=True, wait=False)
+ with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
+ retcode = wrap_and_call(
+ "cloudinit.cmd.status",
+ {
+ "_is_cloudinit_disabled": (False, ""),
+ "Init": {"side_effect": self.init_class},
+ },
+ status.handle_status_args,
+ "ignored",
+ cmdargs,
+ )
+ self.assertEqual(0, retcode)
+ expected = dedent(
+ """\
+ status: done
+ time: Thu, 01 Jan 1970 00:02:05 +0000
+ detail:
+ DataSourceNoCloud [seed=/var/.../seed/nocloud-net][dsmode=net]
+ """
+ )
+ self.assertEqual(expected, m_stdout.getvalue())
+
+ def test_status_on_errors(self):
+ """Reports error when any stage has errors."""
+ write_json(
+ self.status_file,
+ {
+ "v1": {
+ "stage": None,
+ "blah": {"errors": [], "finished": 123.456},
+ "init": {
+ "errors": ["error1"],
+ "start": 124.567,
+ "finished": 125.678,
+ },
+ "init-local": {"start": 123.45, "finished": 123.46},
+ }
+ },
+ )
+ cmdargs = myargs(long=False, wait=False)
+ with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
+ retcode = wrap_and_call(
+ "cloudinit.cmd.status",
+ {
+ "_is_cloudinit_disabled": (False, ""),
+ "Init": {"side_effect": self.init_class},
+ },
+ status.handle_status_args,
+ "ignored",
+ cmdargs,
+ )
+ self.assertEqual(1, retcode)
+ self.assertEqual("status: error\n", m_stdout.getvalue())
+
+ def test_status_on_errors_long(self):
+ """Long format of error status includes all error messages."""
+ write_json(
+ self.status_file,
+ {
+ "v1": {
+ "stage": None,
+ "datasource": (
+ "DataSourceNoCloud [seed=/var/.../seed/nocloud-net]"
+ "[dsmode=net]"
+ ),
+ "init": {
+ "errors": ["error1"],
+ "start": 124.567,
+ "finished": 125.678,
+ },
+ "init-local": {
+ "errors": ["error2", "error3"],
+ "start": 123.45,
+ "finished": 123.46,
+ },
+ }
+ },
+ )
+ cmdargs = myargs(long=True, wait=False)
+ with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
+ retcode = wrap_and_call(
+ "cloudinit.cmd.status",
+ {
+ "_is_cloudinit_disabled": (False, ""),
+ "Init": {"side_effect": self.init_class},
+ },
+ status.handle_status_args,
+ "ignored",
+ cmdargs,
+ )
+ self.assertEqual(1, retcode)
+ expected = dedent(
+ """\
+ status: error
+ time: Thu, 01 Jan 1970 00:02:05 +0000
+ detail:
+ error1
+ error2
+ error3
+ """
+ )
+ self.assertEqual(expected, m_stdout.getvalue())
+
+ def test_status_returns_running_long_format(self):
+ """Long format reports the stage in which we are running."""
+ write_json(
+ self.status_file,
+ {
+ "v1": {
+ "stage": "init",
+ "init": {"start": 124.456, "finished": None},
+ "init-local": {"start": 123.45, "finished": 123.46},
+ }
+ },
+ )
+ cmdargs = myargs(long=True, wait=False)
+ with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
+ retcode = wrap_and_call(
+ "cloudinit.cmd.status",
+ {
+ "_is_cloudinit_disabled": (False, ""),
+ "Init": {"side_effect": self.init_class},
+ },
+ status.handle_status_args,
+ "ignored",
+ cmdargs,
+ )
+ self.assertEqual(0, retcode)
+ expected = dedent(
+ """\
+ status: running
+ time: Thu, 01 Jan 1970 00:02:04 +0000
+ detail:
+ Running in stage: init
+ """
+ )
+ self.assertEqual(expected, m_stdout.getvalue())
+
+ def test_status_wait_blocks_until_done(self):
+ """Specifying wait will poll every 1/4 second until done state."""
+ running_json = {
+ "v1": {
+ "stage": "init",
+ "init": {"start": 124.456, "finished": None},
+ "init-local": {"start": 123.45, "finished": 123.46},
+ }
+ }
+ done_json = {
+ "v1": {
+ "stage": None,
+ "init": {"start": 124.456, "finished": 125.678},
+ "init-local": {"start": 123.45, "finished": 123.46},
+ }
+ }
+
+ self.sleep_calls = 0
+
+ def fake_sleep(interval):
+ self.assertEqual(0.25, interval)
+ self.sleep_calls += 1
+ if self.sleep_calls == 2:
+ write_json(self.status_file, running_json)
+ elif self.sleep_calls == 3:
+ write_json(self.status_file, done_json)
+ result_file = self.tmp_path("result.json", self.new_root)
+ ensure_file(result_file)
+
+ cmdargs = myargs(long=False, wait=True)
+ with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
+ retcode = wrap_and_call(
+ "cloudinit.cmd.status",
+ {
+ "sleep": {"side_effect": fake_sleep},
+ "_is_cloudinit_disabled": (False, ""),
+ "Init": {"side_effect": self.init_class},
+ },
+ status.handle_status_args,
+ "ignored",
+ cmdargs,
+ )
+ self.assertEqual(0, retcode)
+ self.assertEqual(4, self.sleep_calls)
+ self.assertEqual("....\nstatus: done\n", m_stdout.getvalue())
+
+ def test_status_wait_blocks_until_error(self):
+ """Specifying wait will poll every 1/4 second until error state."""
+ running_json = {
+ "v1": {
+ "stage": "init",
+ "init": {"start": 124.456, "finished": None},
+ "init-local": {"start": 123.45, "finished": 123.46},
+ }
+ }
+ error_json = {
+ "v1": {
+ "stage": None,
+ "init": {
+ "errors": ["error1"],
+ "start": 124.456,
+ "finished": 125.678,
+ },
+ "init-local": {"start": 123.45, "finished": 123.46},
+ }
+ }
+
+ self.sleep_calls = 0
+
+ def fake_sleep(interval):
+ self.assertEqual(0.25, interval)
+ self.sleep_calls += 1
+ if self.sleep_calls == 2:
+ write_json(self.status_file, running_json)
+ elif self.sleep_calls == 3:
+ write_json(self.status_file, error_json)
+
+ cmdargs = myargs(long=False, wait=True)
+ with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
+ retcode = wrap_and_call(
+ "cloudinit.cmd.status",
+ {
+ "sleep": {"side_effect": fake_sleep},
+ "_is_cloudinit_disabled": (False, ""),
+ "Init": {"side_effect": self.init_class},
+ },
+ status.handle_status_args,
+ "ignored",
+ cmdargs,
+ )
+ self.assertEqual(1, retcode)
+ self.assertEqual(4, self.sleep_calls)
+ self.assertEqual("....\nstatus: error\n", m_stdout.getvalue())
+
+ def test_status_main(self):
+ """status.main can be run as a standalone script."""
+ write_json(
+ self.status_file, {"v1": {"init": {"start": 1, "finished": None}}}
+ )
+ with self.assertRaises(SystemExit) as context_manager:
+ with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout:
+ wrap_and_call(
+ "cloudinit.cmd.status",
+ {
+ "sys.argv": {"new": ["status"]},
+ "_is_cloudinit_disabled": (False, ""),
+ "Init": {"side_effect": self.init_class},
+ },
+ status.main,
+ )
+ self.assertEqual(0, context_manager.exception.code)
+ self.assertEqual("status: running\n", m_stdout.getvalue())
+
+
+# vi: ts=4 expandtab syntax=python
diff --git a/tests/cloud_tests/platforms/nocloudkvm/__init__.py b/tests/unittests/config/__init__.py
index e69de29b..e69de29b 100644
--- a/tests/cloud_tests/platforms/nocloudkvm/__init__.py
+++ b/tests/unittests/config/__init__.py
diff --git a/tests/unittests/test_handler/test_handler_apt_conf_v1.py b/tests/unittests/config/test_apt_conf_v1.py
index 6a4b03ee..5a75cf0a 100644
--- a/tests/unittests/test_handler/test_handler_apt_conf_v1.py
+++ b/tests/unittests/config/test_apt_conf_v1.py
@@ -1,16 +1,15 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.config import cc_apt_configure
-from cloudinit import util
-
-from cloudinit.tests.helpers import TestCase
-
import copy
import os
import re
import shutil
import tempfile
+from cloudinit import util
+from cloudinit.config import cc_apt_configure
+from tests.unittests.helpers import TestCase
+
class TestAptProxyConfig(TestCase):
def setUp(self):
@@ -23,10 +22,12 @@ class TestAptProxyConfig(TestCase):
def _search_apt_config(self, contents, ptype, value):
return re.search(
r"acquire::%s::proxy\s+[\"']%s[\"'];\n" % (ptype, value),
- contents, flags=re.IGNORECASE)
+ contents,
+ flags=re.IGNORECASE,
+ )
def test_apt_proxy_written(self):
- cfg = {'proxy': 'myproxy'}
+ cfg = {"proxy": "myproxy"}
cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile)
self.assertTrue(os.path.isfile(self.pfile))
@@ -36,7 +37,7 @@ class TestAptProxyConfig(TestCase):
self.assertTrue(self._search_apt_config(contents, "http", "myproxy"))
def test_apt_http_proxy_written(self):
- cfg = {'http_proxy': 'myproxy'}
+ cfg = {"http_proxy": "myproxy"}
cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile)
self.assertTrue(os.path.isfile(self.pfile))
@@ -46,14 +47,17 @@ class TestAptProxyConfig(TestCase):
self.assertTrue(self._search_apt_config(contents, "http", "myproxy"))
def test_apt_all_proxy_written(self):
- cfg = {'http_proxy': 'myproxy_http_proxy',
- 'https_proxy': 'myproxy_https_proxy',
- 'ftp_proxy': 'myproxy_ftp_proxy'}
-
- values = {'http': cfg['http_proxy'],
- 'https': cfg['https_proxy'],
- 'ftp': cfg['ftp_proxy'],
- }
+ cfg = {
+ "http_proxy": "myproxy_http_proxy",
+ "https_proxy": "myproxy_https_proxy",
+ "ftp_proxy": "myproxy_ftp_proxy",
+ }
+
+ values = {
+ "http": cfg["http_proxy"],
+ "https": cfg["https_proxy"],
+ "ftp": cfg["ftp_proxy"],
+ }
cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile)
@@ -73,15 +77,16 @@ class TestAptProxyConfig(TestCase):
def test_proxy_replaced(self):
util.write_file(self.cfile, "content doesnt matter")
- cc_apt_configure.apply_apt_config({'proxy': "foo"},
- self.pfile, self.cfile)
+ cc_apt_configure.apply_apt_config(
+ {"proxy": "foo"}, self.pfile, self.cfile
+ )
self.assertTrue(os.path.isfile(self.pfile))
contents = util.load_file(self.pfile)
self.assertTrue(self._search_apt_config(contents, "http", "foo"))
def test_config_written(self):
- payload = 'this is my apt config'
- cfg = {'conf': payload}
+ payload = "this is my apt config"
+ cfg = {"conf": payload}
cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile)
@@ -92,8 +97,9 @@ class TestAptProxyConfig(TestCase):
def test_config_replaced(self):
util.write_file(self.pfile, "content doesnt matter")
- cc_apt_configure.apply_apt_config({'conf': "foo"},
- self.pfile, self.cfile)
+ cc_apt_configure.apply_apt_config(
+ {"conf": "foo"}, self.pfile, self.cfile
+ )
self.assertTrue(os.path.isfile(self.cfile))
self.assertEqual(util.load_file(self.cfile), "foo")
@@ -109,21 +115,23 @@ class TestConversion(TestCase):
def test_convert_with_apt_mirror_as_empty_string(self):
# an empty apt_mirror is the same as no apt_mirror
empty_m_found = cc_apt_configure.convert_to_v3_apt_format(
- {'apt_mirror': ''})
+ {"apt_mirror": ""}
+ )
default_found = cc_apt_configure.convert_to_v3_apt_format({})
self.assertEqual(default_found, empty_m_found)
def test_convert_with_apt_mirror(self):
- mirror = 'http://my.mirror/ubuntu'
- f = cc_apt_configure.convert_to_v3_apt_format({'apt_mirror': mirror})
- self.assertIn(mirror, set(m['uri'] for m in f['apt']['primary']))
+ mirror = "http://my.mirror/ubuntu"
+ f = cc_apt_configure.convert_to_v3_apt_format({"apt_mirror": mirror})
+ self.assertIn(mirror, set(m["uri"] for m in f["apt"]["primary"]))
def test_no_old_content(self):
- mirror = 'http://my.mirror/ubuntu'
- mydata = {'apt': {'primary': {'arches': ['default'], 'uri': mirror}}}
+ mirror = "http://my.mirror/ubuntu"
+ mydata = {"apt": {"primary": {"arches": ["default"], "uri": mirror}}}
expected = copy.deepcopy(mydata)
- self.assertEqual(expected,
- cc_apt_configure.convert_to_v3_apt_format(mydata))
+ self.assertEqual(
+ expected, cc_apt_configure.convert_to_v3_apt_format(mydata)
+ )
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py b/tests/unittests/config/test_apt_configure_sources_list_v1.py
index 369480be..d4ade106 100644
--- a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py
+++ b/tests/unittests/config/test_apt_configure_sources_list_v1.py
@@ -9,19 +9,11 @@ import shutil
import tempfile
from unittest import mock
-from cloudinit import cloud
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import templater
-from cloudinit import subp
-from cloudinit import util
-
+from cloudinit import subp, templater, util
from cloudinit.config import cc_apt_configure
-from cloudinit.sources import DataSourceNone
-
from cloudinit.distros.debian import Distro
-
-from cloudinit.tests import helpers as t_help
+from tests.unittests import helpers as t_help
+from tests.unittests.util import get_cloud
LOG = logging.getLogger(__name__)
@@ -44,8 +36,7 @@ apt_custom_sources_list: |
# FIND_SOMETHING_SPECIAL
"""
-EXPECTED_CONVERTED_CONTENT = (
- """## Note, this file is written by cloud-init on first boot of an instance
+EXPECTED_CONVERTED_CONTENT = """## Note, this file is written by cloud-init on first boot of an instance
## modifications made here will not survive a re-bundle.
## if you wish to make changes you can:
## a.) add 'apt_preserve_sources_list: true' to /etc/cloud/cloud.cfg
@@ -58,13 +49,14 @@ EXPECTED_CONVERTED_CONTENT = (
deb http://archive.ubuntu.com/ubuntu/ fakerelease main restricted
deb-src http://archive.ubuntu.com/ubuntu/ fakerelease main restricted
# FIND_SOMETHING_SPECIAL
-""")
+"""
class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
"""TestAptSourceConfigSourceList
Main Class to test sources list rendering
"""
+
def setUp(self):
super(TestAptSourceConfigSourceList, self).setUp()
self.subp = subp.subp
@@ -73,23 +65,13 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
rpatcher = mock.patch("cloudinit.util.lsb_release")
get_rel = rpatcher.start()
- get_rel.return_value = {'codename': "fakerelease"}
+ get_rel.return_value = {"codename": "fakerelease"}
self.addCleanup(rpatcher.stop)
apatcher = mock.patch("cloudinit.util.get_dpkg_architecture")
get_arch = apatcher.start()
- get_arch.return_value = 'amd64'
+ get_arch.return_value = "amd64"
self.addCleanup(apatcher.stop)
- def _get_cloud(self, distro, metadata=None):
- self.patchUtils(self.new_root)
- paths = helpers.Paths({})
- cls = distros.fetch(distro)
- mydist = cls(distro, {}, paths)
- myds = DataSourceNone.DataSourceNone({}, mydist, paths)
- if metadata:
- myds.metadata.update(metadata)
- return cloud.Cloud(myds, paths, {}, mydist, None)
-
def apt_source_list(self, distro, mirror, mirrorcheck=None):
"""apt_source_list
Test rendering of a source.list from template for a given distro
@@ -98,47 +80,57 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
mirrorcheck = mirror
if isinstance(mirror, list):
- cfg = {'apt_mirror_search': mirror}
+ cfg = {"apt_mirror_search": mirror}
else:
- cfg = {'apt_mirror': mirror}
+ cfg = {"apt_mirror": mirror}
- mycloud = self._get_cloud(distro)
+ mycloud = get_cloud(distro)
- with mock.patch.object(util, 'write_file') as mockwf:
- with mock.patch.object(util, 'load_file',
- return_value="faketmpl") as mocklf:
- with mock.patch.object(os.path, 'isfile',
- return_value=True) as mockisfile:
+ with mock.patch.object(util, "write_file") as mockwf:
+ with mock.patch.object(
+ util, "load_file", return_value="faketmpl"
+ ) as mocklf:
+ with mock.patch.object(
+ os.path, "isfile", return_value=True
+ ) as mockisfile:
with mock.patch.object(
- templater, 'render_string',
- return_value='fake') as mockrnd:
- with mock.patch.object(util, 'rename'):
- cc_apt_configure.handle("test", cfg, mycloud,
- LOG, None)
+ templater, "render_string", return_value="fake"
+ ) as mockrnd:
+ with mock.patch.object(util, "rename"):
+ cc_apt_configure.handle(
+ "test", cfg, mycloud, LOG, None
+ )
mockisfile.assert_any_call(
- ('/etc/cloud/templates/sources.list.%s.tmpl' % distro))
+ "/etc/cloud/templates/sources.list.%s.tmpl" % distro
+ )
mocklf.assert_any_call(
- ('/etc/cloud/templates/sources.list.%s.tmpl' % distro))
- mockrnd.assert_called_once_with('faketmpl',
- {'RELEASE': 'fakerelease',
- 'PRIMARY': mirrorcheck,
- 'MIRROR': mirrorcheck,
- 'SECURITY': mirrorcheck,
- 'codename': 'fakerelease',
- 'primary': mirrorcheck,
- 'mirror': mirrorcheck,
- 'security': mirrorcheck})
- mockwf.assert_called_once_with('/etc/apt/sources.list', 'fake',
- mode=0o644)
+ "/etc/cloud/templates/sources.list.%s.tmpl" % distro
+ )
+ mockrnd.assert_called_once_with(
+ "faketmpl",
+ {
+ "RELEASE": "fakerelease",
+ "PRIMARY": mirrorcheck,
+ "MIRROR": mirrorcheck,
+ "SECURITY": mirrorcheck,
+ "codename": "fakerelease",
+ "primary": mirrorcheck,
+ "mirror": mirrorcheck,
+ "security": mirrorcheck,
+ },
+ )
+ mockwf.assert_called_once_with(
+ "/etc/apt/sources.list", "fake", mode=0o644
+ )
def test_apt_v1_source_list_debian(self):
"""Test rendering of a source.list from template for debian"""
- self.apt_source_list('debian', 'http://httpredir.debian.org/debian')
+ self.apt_source_list("debian", "http://httpredir.debian.org/debian")
def test_apt_v1_source_list_ubuntu(self):
"""Test rendering of a source.list from template for ubuntu"""
- self.apt_source_list('ubuntu', 'http://archive.ubuntu.com/ubuntu/')
+ self.apt_source_list("ubuntu", "http://archive.ubuntu.com/ubuntu/")
@staticmethod
def myresolve(name):
@@ -152,43 +144,51 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
def test_apt_v1_srcl_debian_mirrorfail(self):
"""Test rendering of a source.list from template for debian"""
- with mock.patch.object(util, 'is_resolvable',
- side_effect=self.myresolve) as mockresolve:
- self.apt_source_list('debian',
- ['http://does.not.exist',
- 'http://httpredir.debian.org/debian'],
- 'http://httpredir.debian.org/debian')
+ with mock.patch.object(
+ util, "is_resolvable", side_effect=self.myresolve
+ ) as mockresolve:
+ self.apt_source_list(
+ "debian",
+ [
+ "http://does.not.exist",
+ "http://httpredir.debian.org/debian",
+ ],
+ "http://httpredir.debian.org/debian",
+ )
mockresolve.assert_any_call("does.not.exist")
mockresolve.assert_any_call("httpredir.debian.org")
def test_apt_v1_srcl_ubuntu_mirrorfail(self):
"""Test rendering of a source.list from template for ubuntu"""
- with mock.patch.object(util, 'is_resolvable',
- side_effect=self.myresolve) as mockresolve:
- self.apt_source_list('ubuntu',
- ['http://does.not.exist',
- 'http://archive.ubuntu.com/ubuntu/'],
- 'http://archive.ubuntu.com/ubuntu/')
+ with mock.patch.object(
+ util, "is_resolvable", side_effect=self.myresolve
+ ) as mockresolve:
+ self.apt_source_list(
+ "ubuntu",
+ ["http://does.not.exist", "http://archive.ubuntu.com/ubuntu/"],
+ "http://archive.ubuntu.com/ubuntu/",
+ )
mockresolve.assert_any_call("does.not.exist")
mockresolve.assert_any_call("archive.ubuntu.com")
def test_apt_v1_srcl_custom(self):
"""Test rendering from a custom source.list template"""
cfg = util.load_yaml(YAML_TEXT_CUSTOM_SL)
- mycloud = self._get_cloud('ubuntu')
+ mycloud = get_cloud()
# the second mock restores the original subp
- with mock.patch.object(util, 'write_file') as mockwrite:
- with mock.patch.object(subp, 'subp', self.subp):
- with mock.patch.object(Distro, 'get_primary_arch',
- return_value='amd64'):
- cc_apt_configure.handle("notimportant", cfg, mycloud,
- LOG, None)
+ with mock.patch.object(util, "write_file") as mockwrite:
+ with mock.patch.object(subp, "subp", self.subp):
+ with mock.patch.object(
+ Distro, "get_primary_arch", return_value="amd64"
+ ):
+ cc_apt_configure.handle(
+ "notimportant", cfg, mycloud, LOG, None
+ )
mockwrite.assert_called_once_with(
- '/etc/apt/sources.list',
- EXPECTED_CONVERTED_CONTENT,
- mode=420)
+ "/etc/apt/sources.list", EXPECTED_CONVERTED_CONTENT, mode=420
+ )
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py b/tests/unittests/config/test_apt_configure_sources_list_v3.py
index b96fd4d4..d9ec6f74 100644
--- a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py
+++ b/tests/unittests/config/test_apt_configure_sources_list_v3.py
@@ -7,21 +7,15 @@ import logging
import os
import shutil
import tempfile
+from contextlib import ExitStack
from unittest import mock
from unittest.mock import call
-from cloudinit import cloud
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import subp
-from cloudinit import util
-
+from cloudinit import subp, util
from cloudinit.config import cc_apt_configure
-from cloudinit.sources import DataSourceNone
-
from cloudinit.distros.debian import Distro
-
-from cloudinit.tests import helpers as t_help
+from tests.unittests import helpers as t_help
+from tests.unittests.util import get_cloud
LOG = logging.getLogger(__name__)
@@ -69,30 +63,31 @@ deb http://test.ubuntu.com/ubuntu/ notouched-updates main restricted
deb http://testsec.ubuntu.com/ubuntu/ notouched-security main restricted
"""
-EXPECTED_BASE_CONTENT = ("""
+EXPECTED_BASE_CONTENT = """
deb http://test.ubuntu.com/ubuntu/ notouched main restricted
deb-src http://test.ubuntu.com/ubuntu/ notouched main restricted
deb http://test.ubuntu.com/ubuntu/ notouched-updates main restricted
deb http://testsec.ubuntu.com/ubuntu/ notouched-security main restricted
-""")
+"""
-EXPECTED_MIRROR_CONTENT = ("""
+EXPECTED_MIRROR_CONTENT = """
deb http://test.ubuntu.com/ubuntu/ notouched main restricted
deb-src http://test.ubuntu.com/ubuntu/ notouched main restricted
deb http://test.ubuntu.com/ubuntu/ notouched-updates main restricted
deb http://test.ubuntu.com/ubuntu/ notouched-security main restricted
-""")
+"""
-EXPECTED_PRIMSEC_CONTENT = ("""
+EXPECTED_PRIMSEC_CONTENT = """
deb http://test.ubuntu.com/ubuntu/ notouched main restricted
deb-src http://test.ubuntu.com/ubuntu/ notouched main restricted
deb http://test.ubuntu.com/ubuntu/ notouched-updates main restricted
deb http://testsec.ubuntu.com/ubuntu/ notouched-security main restricted
-""")
+"""
class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
"""TestAptSourceConfigSourceList - Class to test sources list rendering"""
+
def setUp(self):
super(TestAptSourceConfigSourceList, self).setUp()
self.subp = subp.subp
@@ -101,57 +96,60 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
rpatcher = mock.patch("cloudinit.util.lsb_release")
get_rel = rpatcher.start()
- get_rel.return_value = {'codename': "fakerel"}
+ get_rel.return_value = {"codename": "fakerel"}
self.addCleanup(rpatcher.stop)
apatcher = mock.patch("cloudinit.util.get_dpkg_architecture")
get_arch = apatcher.start()
- get_arch.return_value = 'amd64'
+ get_arch.return_value = "amd64"
self.addCleanup(apatcher.stop)
- def _get_cloud(self, distro, metadata=None):
- self.patchUtils(self.new_root)
- paths = helpers.Paths({})
- cls = distros.fetch(distro)
- mydist = cls(distro, {}, paths)
- myds = DataSourceNone.DataSourceNone({}, mydist, paths)
- if metadata:
- myds.metadata.update(metadata)
- return cloud.Cloud(myds, paths, {}, mydist, None)
-
def _apt_source_list(self, distro, cfg, cfg_on_empty=False):
"""_apt_source_list - Test rendering from template (generic)"""
# entry at top level now, wrap in 'apt' key
- cfg = {'apt': cfg}
- mycloud = self._get_cloud(distro)
-
- with mock.patch.object(util, 'write_file') as mock_writefile:
- with mock.patch.object(util, 'load_file',
- return_value=MOCKED_APT_SRC_LIST
- ) as mock_loadfile:
- with mock.patch.object(os.path, 'isfile',
- return_value=True) as mock_isfile:
- cfg_func = ('cloudinit.config.cc_apt_configure.' +
- '_should_configure_on_empty_apt')
- with mock.patch(cfg_func,
- return_value=(cfg_on_empty, "test")
- ) as mock_shouldcfg:
- cc_apt_configure.handle("test", cfg, mycloud, LOG,
- None)
-
- return mock_writefile, mock_loadfile, mock_isfile, mock_shouldcfg
+ cfg = {"apt": cfg}
+ mycloud = get_cloud(distro)
+
+ with ExitStack() as stack:
+ mock_writefile = stack.enter_context(
+ mock.patch.object(util, "write_file")
+ )
+ mock_loadfile = stack.enter_context(
+ mock.patch.object(
+ util, "load_file", return_value=MOCKED_APT_SRC_LIST
+ )
+ )
+ mock_isfile = stack.enter_context(
+ mock.patch.object(os.path, "isfile", return_value=True)
+ )
+ stack.enter_context(mock.patch.object(util, "del_file"))
+ cfg_func = (
+ "cloudinit.config.cc_apt_configure."
+ "_should_configure_on_empty_apt"
+ )
+ mock_shouldcfg = stack.enter_context(
+ mock.patch(cfg_func, return_value=(cfg_on_empty, "test"))
+ )
+ cc_apt_configure.handle("test", cfg, mycloud, LOG, None)
+
+ return mock_writefile, mock_loadfile, mock_isfile, mock_shouldcfg
def test_apt_v3_source_list_debian(self):
"""test_apt_v3_source_list_debian - without custom sources or parms"""
cfg = {}
- distro = 'debian'
+ distro = "debian"
expected = EXPECTED_BASE_CONTENT
- mock_writefile, mock_load_file, mock_isfile, mock_shouldcfg = (
- self._apt_source_list(distro, cfg, cfg_on_empty=True))
-
- template = '/etc/cloud/templates/sources.list.%s.tmpl' % distro
- mock_writefile.assert_called_once_with('/etc/apt/sources.list',
- expected, mode=0o644)
+ (
+ mock_writefile,
+ mock_load_file,
+ mock_isfile,
+ mock_shouldcfg,
+ ) = self._apt_source_list(distro, cfg, cfg_on_empty=True)
+
+ template = "/etc/cloud/templates/sources.list.%s.tmpl" % distro
+ mock_writefile.assert_called_once_with(
+ "/etc/apt/sources.list", expected, mode=0o644
+ )
mock_load_file.assert_called_with(template)
mock_isfile.assert_any_call(template)
self.assertEqual(1, mock_shouldcfg.call_count)
@@ -159,15 +157,20 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
def test_apt_v3_source_list_ubuntu(self):
"""test_apt_v3_source_list_ubuntu - without custom sources or parms"""
cfg = {}
- distro = 'ubuntu'
+ distro = "ubuntu"
expected = EXPECTED_BASE_CONTENT
- mock_writefile, mock_load_file, mock_isfile, mock_shouldcfg = (
- self._apt_source_list(distro, cfg, cfg_on_empty=True))
-
- template = '/etc/cloud/templates/sources.list.%s.tmpl' % distro
- mock_writefile.assert_called_once_with('/etc/apt/sources.list',
- expected, mode=0o644)
+ (
+ mock_writefile,
+ mock_load_file,
+ mock_isfile,
+ mock_shouldcfg,
+ ) = self._apt_source_list(distro, cfg, cfg_on_empty=True)
+
+ template = "/etc/cloud/templates/sources.list.%s.tmpl" % distro
+ mock_writefile.assert_called_once_with(
+ "/etc/apt/sources.list", expected, mode=0o644
+ )
mock_load_file.assert_called_with(template)
mock_isfile.assert_any_call(template)
self.assertEqual(1, mock_shouldcfg.call_count)
@@ -175,12 +178,13 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
def test_apt_v3_source_list_ubuntu_snappy(self):
"""test_apt_v3_source_list_ubuntu_snappy - without custom sources or
parms"""
- cfg = {'apt': {}}
- mycloud = self._get_cloud('ubuntu')
+ cfg = {"apt": {}}
+ mycloud = get_cloud()
- with mock.patch.object(util, 'write_file') as mock_writefile:
- with mock.patch.object(util, 'system_is_snappy',
- return_value=True) as mock_issnappy:
+ with mock.patch.object(util, "write_file") as mock_writefile:
+ with mock.patch.object(
+ util, "system_is_snappy", return_value=True
+ ) as mock_issnappy:
cc_apt_configure.handle("test", cfg, mycloud, LOG, None)
self.assertEqual(0, mock_writefile.call_count)
@@ -189,7 +193,7 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
def test_apt_v3_source_list_centos(self):
"""test_apt_v3_source_list_centos - without custom sources or parms"""
cfg = {}
- distro = 'rhel'
+ distro = "rhel"
mock_writefile, _, _, _ = self._apt_source_list(distro, cfg)
@@ -197,41 +201,47 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
def test_apt_v3_source_list_psm(self):
"""test_apt_v3_source_list_psm - Test specifying prim+sec mirrors"""
- pm = 'http://test.ubuntu.com/ubuntu/'
- sm = 'http://testsec.ubuntu.com/ubuntu/'
- cfg = {'preserve_sources_list': False,
- 'primary': [{'arches': ["default"],
- 'uri': pm}],
- 'security': [{'arches': ["default"],
- 'uri': sm}]}
- distro = 'ubuntu'
+ pm = "http://test.ubuntu.com/ubuntu/"
+ sm = "http://testsec.ubuntu.com/ubuntu/"
+ cfg = {
+ "preserve_sources_list": False,
+ "primary": [{"arches": ["default"], "uri": pm}],
+ "security": [{"arches": ["default"], "uri": sm}],
+ }
+ distro = "ubuntu"
expected = EXPECTED_PRIMSEC_CONTENT
- mock_writefile, mock_load_file, mock_isfile, _ = (
- self._apt_source_list(distro, cfg, cfg_on_empty=True))
+ mock_writefile, mock_load_file, mock_isfile, _ = self._apt_source_list(
+ distro, cfg, cfg_on_empty=True
+ )
- template = '/etc/cloud/templates/sources.list.%s.tmpl' % distro
- mock_writefile.assert_called_once_with('/etc/apt/sources.list',
- expected, mode=0o644)
+ template = "/etc/cloud/templates/sources.list.%s.tmpl" % distro
+ mock_writefile.assert_called_once_with(
+ "/etc/apt/sources.list", expected, mode=0o644
+ )
mock_load_file.assert_called_with(template)
mock_isfile.assert_any_call(template)
def test_apt_v3_srcl_custom(self):
"""test_apt_v3_srcl_custom - Test rendering a custom source template"""
cfg = util.load_yaml(YAML_TEXT_CUSTOM_SL)
- mycloud = self._get_cloud('ubuntu')
+ mycloud = get_cloud()
# the second mock restores the original subp
- with mock.patch.object(util, 'write_file') as mockwrite:
- with mock.patch.object(subp, 'subp', self.subp):
- with mock.patch.object(Distro, 'get_primary_arch',
- return_value='amd64'):
- cc_apt_configure.handle("notimportant", cfg, mycloud,
- LOG, None)
-
- calls = [call('/etc/apt/sources.list',
- EXPECTED_CONVERTED_CONTENT,
- mode=0o644)]
+ with mock.patch.object(util, "write_file") as mockwrite:
+ with mock.patch.object(subp, "subp", self.subp):
+ with mock.patch.object(
+ Distro, "get_primary_arch", return_value="amd64"
+ ):
+ cc_apt_configure.handle(
+ "notimportant", cfg, mycloud, LOG, None
+ )
+
+ calls = [
+ call(
+ "/etc/apt/sources.list", EXPECTED_CONVERTED_CONTENT, mode=0o644
+ )
+ ]
mockwrite.assert_has_calls(calls)
diff --git a/tests/unittests/config/test_apt_key.py b/tests/unittests/config/test_apt_key.py
new file mode 100644
index 00000000..9fcf3039
--- /dev/null
+++ b/tests/unittests/config/test_apt_key.py
@@ -0,0 +1,124 @@
+import os
+from unittest import mock
+
+from cloudinit import subp, util
+from cloudinit.config import cc_apt_configure
+
+TEST_KEY_HUMAN = """
+/etc/apt/cloud-init.gpg.d/my_key.gpg
+--------------------------------------------
+pub rsa4096 2021-10-22 [SC]
+ 3A3E F34D FDED B3B7 F3FD F603 F83F 7712 9A5E BD85
+uid [ unknown] Brett Holman <brett.holman@canonical.com>
+sub rsa4096 2021-10-22 [A]
+sub rsa4096 2021-10-22 [E]
+"""
+
+TEST_KEY_MACHINE = """
+tru::1:1635129362:0:3:1:5
+pub:-:4096:1:F83F77129A5EBD85:1634912922:::-:::scESCA::::::23::0:
+fpr:::::::::3A3EF34DFDEDB3B7F3FDF603F83F77129A5EBD85:
+uid:-::::1634912922::64F1F1D6FA96316752D635D7C6406C52C40713C7::Brett Holman \
+<brett.holman@canonical.com>::::::::::0:
+sub:-:4096:1:544B39C9A9141F04:1634912922::::::a::::::23:
+fpr:::::::::8BD901490D6EC986D03D6F0D544B39C9A9141F04:
+sub:-:4096:1:F45D9443F0A87092:1634912922::::::e::::::23:
+fpr:::::::::8CCCB332317324F030A45B19F45D9443F0A87092:
+"""
+
+TEST_KEY_FINGERPRINT_HUMAN = (
+ "3A3E F34D FDED B3B7 F3FD F603 F83F 7712 9A5E BD85"
+)
+
+TEST_KEY_FINGERPRINT_MACHINE = "3A3EF34DFDEDB3B7F3FDF603F83F77129A5EBD85"
+
+
+class TestAptKey:
+ """TestAptKey
+ Class to test apt-key commands
+ """
+
+ @mock.patch.object(subp, "subp", return_value=("fakekey", ""))
+ @mock.patch.object(util, "write_file")
+ def _apt_key_add_success_helper(self, directory, *args, hardened=False):
+ file = cc_apt_configure.apt_key(
+ "add", output_file="my-key", data="fakekey", hardened=hardened
+ )
+ assert file == directory + "/my-key.gpg"
+
+ def test_apt_key_add_success(self):
+ """Verify the right directory path gets returned for unhardened case"""
+ self._apt_key_add_success_helper("/etc/apt/trusted.gpg.d")
+
+ def test_apt_key_add_success_hardened(self):
+ """Verify the right directory path gets returned for hardened case"""
+ self._apt_key_add_success_helper(
+ "/etc/apt/cloud-init.gpg.d", hardened=True
+ )
+
+ def test_apt_key_add_fail_no_file_name(self):
+ """Verify that null filename gets handled correctly"""
+ file = cc_apt_configure.apt_key("add", output_file=None, data="")
+ assert "/dev/null" == file
+
+ def _apt_key_fail_helper(self):
+ file = cc_apt_configure.apt_key(
+ "add", output_file="my-key", data="fakekey"
+ )
+ assert file == "/dev/null"
+
+ @mock.patch.object(subp, "subp", side_effect=subp.ProcessExecutionError)
+ def test_apt_key_add_fail_no_file_name_subproc(self, *args):
+ """Verify that bad key value gets handled correctly"""
+ self._apt_key_fail_helper()
+
+ @mock.patch.object(
+ subp, "subp", side_effect=UnicodeDecodeError("test", b"", 1, 1, "")
+ )
+ def test_apt_key_add_fail_no_file_name_unicode(self, *args):
+ """Verify that bad key encoding gets handled correctly"""
+ self._apt_key_fail_helper()
+
+ def _apt_key_list_success_helper(self, finger, key, human_output=True):
+ @mock.patch.object(os, "listdir", return_value=("/fake/dir/key.gpg",))
+ @mock.patch.object(subp, "subp", return_value=(key, ""))
+ def mocked_list(*a):
+
+ keys = cc_apt_configure.apt_key("list", human_output)
+ assert finger in keys
+
+ mocked_list()
+
+ def test_apt_key_list_success_human(self):
+ """Verify expected key output, human"""
+ self._apt_key_list_success_helper(
+ TEST_KEY_FINGERPRINT_HUMAN, TEST_KEY_HUMAN
+ )
+
+ def test_apt_key_list_success_machine(self):
+ """Verify expected key output, machine"""
+ self._apt_key_list_success_helper(
+ TEST_KEY_FINGERPRINT_MACHINE, TEST_KEY_MACHINE, human_output=False
+ )
+
+ @mock.patch.object(os, "listdir", return_value=())
+ @mock.patch.object(subp, "subp", return_value=("", ""))
+ def test_apt_key_list_fail_no_keys(self, *args):
+ """Ensure falsy output for no keys"""
+ keys = cc_apt_configure.apt_key("list")
+ assert not keys
+
+ @mock.patch.object(os, "listdir", return_value="file_not_gpg_key.txt")
+ @mock.patch.object(subp, "subp", return_value=("", ""))
+ def test_apt_key_list_fail_no_keys_file(self, *args):
+ """Ensure non-gpg file is not returned.
+
+ apt-key used file extensions for this, so we do too
+ """
+ assert not cc_apt_configure.apt_key("list")
+
+ @mock.patch.object(subp, "subp", side_effect=subp.ProcessExecutionError)
+ @mock.patch.object(os, "listdir", return_value="bad_gpg_key.gpg")
+ def test_apt_key_list_fail_bad_key_file(self, *args):
+ """Ensure bad gpg key doesn't throw exeption."""
+ assert not cc_apt_configure.apt_key("list")
diff --git a/tests/unittests/config/test_apt_source_v1.py b/tests/unittests/config/test_apt_source_v1.py
new file mode 100644
index 00000000..fbc2bf45
--- /dev/null
+++ b/tests/unittests/config/test_apt_source_v1.py
@@ -0,0 +1,852 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+""" test_handler_apt_source_v1
+Testing various config variations of the apt_source config
+This calls all things with v1 format to stress the conversion code on top of
+the actually tested code.
+"""
+import os
+import pathlib
+import re
+import shutil
+import tempfile
+from unittest import mock
+from unittest.mock import call
+
+from cloudinit import gpg, subp, util
+from cloudinit.config import cc_apt_configure
+from tests.unittests.helpers import TestCase
+
+EXPECTEDKEY = """-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: GnuPG v1
+
+mI0ESuZLUgEEAKkqq3idtFP7g9hzOu1a8+v8ImawQN4TrvlygfScMU1TIS1eC7UQ
+NUA8Qqgr9iUaGnejb0VciqftLrU9D6WYHSKz+EITefgdyJ6SoQxjoJdsCpJ7o9Jy
+8PQnpRttiFm4qHu6BVnKnBNxw/z3ST9YMqW5kbMQpfxbGe+obRox59NpABEBAAG0
+HUxhdW5jaHBhZCBQUEEgZm9yIFNjb3R0IE1vc2VyiLYEEwECACAFAkrmS1ICGwMG
+CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRAGILvPA2g/d3aEA/9tVjc10HOZwV29
+OatVuTeERjjrIbxflO586GLA8cp0C9RQCwgod/R+cKYdQcHjbqVcP0HqxveLg0RZ
+FJpWLmWKamwkABErwQLGlM/Hwhjfade8VvEQutH5/0JgKHmzRsoqfR+LMO6OS+Sm
+S0ORP6HXET3+jC8BMG4tBWCTK/XEZw==
+=ACB2
+-----END PGP PUBLIC KEY BLOCK-----"""
+
+ADD_APT_REPO_MATCH = r"^[\w-]+:\w"
+
+
+class FakeDistro(object):
+ """Fake Distro helper object"""
+
+ def update_package_sources(self):
+ """Fake update_package_sources helper method"""
+ return
+
+
+class FakeDatasource:
+ """Fake Datasource helper object"""
+
+ def __init__(self):
+ self.region = "region"
+
+
+class FakeCloud(object):
+ """Fake Cloud helper object"""
+
+ def __init__(self):
+ self.distro = FakeDistro()
+ self.datasource = FakeDatasource()
+
+
+class TestAptSourceConfig(TestCase):
+ """TestAptSourceConfig
+ Main Class to test apt_source configs
+ """
+
+ release = "fantastic"
+
+ def setUp(self):
+ super(TestAptSourceConfig, self).setUp()
+ self.tmp = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, self.tmp)
+ self.aptlistfile = os.path.join(self.tmp, "single-deb.list")
+ self.aptlistfile2 = os.path.join(self.tmp, "single-deb2.list")
+ self.aptlistfile3 = os.path.join(self.tmp, "single-deb3.list")
+ self.join = os.path.join
+ self.matcher = re.compile(ADD_APT_REPO_MATCH).search
+ # mock fallback filename into writable tmp dir
+ self.fallbackfn = os.path.join(
+ self.tmp, "etc/apt/sources.list.d/", "cloud_config_sources.list"
+ )
+
+ self.fakecloud = FakeCloud()
+
+ rpatcher = mock.patch("cloudinit.util.lsb_release")
+ get_rel = rpatcher.start()
+ get_rel.return_value = {"codename": self.release}
+ self.addCleanup(rpatcher.stop)
+ apatcher = mock.patch("cloudinit.util.get_dpkg_architecture")
+ get_arch = apatcher.start()
+ get_arch.return_value = "amd64"
+ self.addCleanup(apatcher.stop)
+
+ def _get_default_params(self):
+ """get_default_params
+ Get the most basic default mrror and release info to be used in tests
+ """
+ params = {}
+ params["RELEASE"] = self.release
+ params["MIRROR"] = "http://archive.ubuntu.com/ubuntu"
+ return params
+
+ def wrapv1conf(self, cfg):
+ params = self._get_default_params()
+ # old v1 list format under old keys, but callabe to main handler
+ # disable source.list rendering and set mirror to avoid other code
+ return {
+ "apt_preserve_sources_list": True,
+ "apt_mirror": params["MIRROR"],
+ "apt_sources": cfg,
+ }
+
+ def myjoin(self, *args, **kwargs):
+ """myjoin - redir into writable tmpdir"""
+ if (
+ args[0] == "/etc/apt/sources.list.d/"
+ and args[1] == "cloud_config_sources.list"
+ and len(args) == 2
+ ):
+ return self.join(self.tmp, args[0].lstrip("/"), args[1])
+ else:
+ return self.join(*args, **kwargs)
+
+ def apt_src_basic(self, filename, cfg):
+ """apt_src_basic
+ Test Fix deb source string, has to overwrite mirror conf in params
+ """
+ cfg = self.wrapv1conf(cfg)
+
+ cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
+
+ self.assertTrue(os.path.isfile(filename))
+
+ contents = util.load_file(filename)
+ self.assertTrue(
+ re.search(
+ r"%s %s %s %s\n"
+ % (
+ "deb",
+ "http://archive.ubuntu.com/ubuntu",
+ "karmic-backports",
+ "main universe multiverse restricted",
+ ),
+ contents,
+ flags=re.IGNORECASE,
+ )
+ )
+
+ def test_apt_src_basic(self):
+ """Test deb source string, overwrite mirror and filename"""
+ cfg = {
+ "source": (
+ "deb http://archive.ubuntu.com/ubuntu"
+ " karmic-backports"
+ " main universe multiverse restricted"
+ ),
+ "filename": self.aptlistfile,
+ }
+ self.apt_src_basic(self.aptlistfile, [cfg])
+
+ def test_apt_src_basic_dict(self):
+ """Test deb source string, overwrite mirror and filename (dict)"""
+ cfg = {
+ self.aptlistfile: {
+ "source": (
+ "deb http://archive.ubuntu.com/ubuntu"
+ " karmic-backports"
+ " main universe multiverse restricted"
+ )
+ }
+ }
+ self.apt_src_basic(self.aptlistfile, cfg)
+
+ def apt_src_basic_tri(self, cfg):
+ """apt_src_basic_tri
+ Test Fix three deb source string, has to overwrite mirror conf in
+ params. Test with filenames provided in config.
+ generic part to check three files with different content
+ """
+ self.apt_src_basic(self.aptlistfile, cfg)
+
+ # extra verify on two extra files of this test
+ contents = util.load_file(self.aptlistfile2)
+ self.assertTrue(
+ re.search(
+ r"%s %s %s %s\n"
+ % (
+ "deb",
+ "http://archive.ubuntu.com/ubuntu",
+ "precise-backports",
+ "main universe multiverse restricted",
+ ),
+ contents,
+ flags=re.IGNORECASE,
+ )
+ )
+ contents = util.load_file(self.aptlistfile3)
+ self.assertTrue(
+ re.search(
+ r"%s %s %s %s\n"
+ % (
+ "deb",
+ "http://archive.ubuntu.com/ubuntu",
+ "lucid-backports",
+ "main universe multiverse restricted",
+ ),
+ contents,
+ flags=re.IGNORECASE,
+ )
+ )
+
+ def test_apt_src_basic_tri(self):
+ """Test Fix three deb source string with filenames"""
+ cfg1 = {
+ "source": (
+ "deb http://archive.ubuntu.com/ubuntu"
+ " karmic-backports"
+ " main universe multiverse restricted"
+ ),
+ "filename": self.aptlistfile,
+ }
+ cfg2 = {
+ "source": (
+ "deb http://archive.ubuntu.com/ubuntu"
+ " precise-backports"
+ " main universe multiverse restricted"
+ ),
+ "filename": self.aptlistfile2,
+ }
+ cfg3 = {
+ "source": (
+ "deb http://archive.ubuntu.com/ubuntu"
+ " lucid-backports"
+ " main universe multiverse restricted"
+ ),
+ "filename": self.aptlistfile3,
+ }
+ self.apt_src_basic_tri([cfg1, cfg2, cfg3])
+
+ def test_apt_src_basic_dict_tri(self):
+ """Test Fix three deb source string with filenames (dict)"""
+ cfg = {
+ self.aptlistfile: {
+ "source": (
+ "deb http://archive.ubuntu.com/ubuntu"
+ " karmic-backports"
+ " main universe multiverse restricted"
+ )
+ },
+ self.aptlistfile2: {
+ "source": (
+ "deb http://archive.ubuntu.com/ubuntu"
+ " precise-backports"
+ " main universe multiverse restricted"
+ )
+ },
+ self.aptlistfile3: {
+ "source": (
+ "deb http://archive.ubuntu.com/ubuntu"
+ " lucid-backports"
+ " main universe multiverse restricted"
+ )
+ },
+ }
+ self.apt_src_basic_tri(cfg)
+
+ def test_apt_src_basic_nofn(self):
+ """Test Fix three deb source string without filenames (dict)"""
+ cfg = {
+ "source": (
+ "deb http://archive.ubuntu.com/ubuntu"
+ " karmic-backports"
+ " main universe multiverse restricted"
+ )
+ }
+ with mock.patch.object(os.path, "join", side_effect=self.myjoin):
+ self.apt_src_basic(self.fallbackfn, [cfg])
+
+ def apt_src_replacement(self, filename, cfg):
+ """apt_src_replace
+ Test Autoreplacement of MIRROR and RELEASE in source specs
+ """
+ cfg = self.wrapv1conf(cfg)
+ params = self._get_default_params()
+ cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
+
+ self.assertTrue(os.path.isfile(filename))
+
+ contents = util.load_file(filename)
+ self.assertTrue(
+ re.search(
+ r"%s %s %s %s\n"
+ % ("deb", params["MIRROR"], params["RELEASE"], "multiverse"),
+ contents,
+ flags=re.IGNORECASE,
+ )
+ )
+
+ def test_apt_src_replace(self):
+ """Test Autoreplacement of MIRROR and RELEASE in source specs"""
+ cfg = {
+ "source": "deb $MIRROR $RELEASE multiverse",
+ "filename": self.aptlistfile,
+ }
+ self.apt_src_replacement(self.aptlistfile, [cfg])
+
+ def apt_src_replace_tri(self, cfg):
+ """apt_src_replace_tri
+ Test three autoreplacements of MIRROR and RELEASE in source specs with
+ generic part
+ """
+ self.apt_src_replacement(self.aptlistfile, cfg)
+
+ # extra verify on two extra files of this test
+ params = self._get_default_params()
+ contents = util.load_file(self.aptlistfile2)
+ self.assertTrue(
+ re.search(
+ r"%s %s %s %s\n"
+ % ("deb", params["MIRROR"], params["RELEASE"], "main"),
+ contents,
+ flags=re.IGNORECASE,
+ )
+ )
+ contents = util.load_file(self.aptlistfile3)
+ self.assertTrue(
+ re.search(
+ r"%s %s %s %s\n"
+ % ("deb", params["MIRROR"], params["RELEASE"], "universe"),
+ contents,
+ flags=re.IGNORECASE,
+ )
+ )
+
+ def test_apt_src_replace_tri(self):
+ """Test triple Autoreplacement of MIRROR and RELEASE in source specs"""
+ cfg1 = {
+ "source": "deb $MIRROR $RELEASE multiverse",
+ "filename": self.aptlistfile,
+ }
+ cfg2 = {
+ "source": "deb $MIRROR $RELEASE main",
+ "filename": self.aptlistfile2,
+ }
+ cfg3 = {
+ "source": "deb $MIRROR $RELEASE universe",
+ "filename": self.aptlistfile3,
+ }
+ self.apt_src_replace_tri([cfg1, cfg2, cfg3])
+
+ def test_apt_src_replace_dict_tri(self):
+ """Test triple Autoreplacement in source specs (dict)"""
+ cfg = {
+ self.aptlistfile: {"source": "deb $MIRROR $RELEASE multiverse"},
+ "notused": {
+ "source": "deb $MIRROR $RELEASE main",
+ "filename": self.aptlistfile2,
+ },
+ self.aptlistfile3: {"source": "deb $MIRROR $RELEASE universe"},
+ }
+ self.apt_src_replace_tri(cfg)
+
+ def test_apt_src_replace_nofn(self):
+ """Test Autoreplacement of MIRROR and RELEASE in source specs nofile"""
+ cfg = {"source": "deb $MIRROR $RELEASE multiverse"}
+ with mock.patch.object(os.path, "join", side_effect=self.myjoin):
+ self.apt_src_replacement(self.fallbackfn, [cfg])
+
+ def apt_src_keyid(self, filename, cfg, keynum):
+ """apt_src_keyid
+ Test specification of a source + keyid
+ """
+ cfg = self.wrapv1conf(cfg)
+
+ with mock.patch.object(cc_apt_configure, "add_apt_key") as mockobj:
+ cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
+
+ # check if it added the right number of keys
+ calls = []
+ sources = cfg["apt"]["sources"]
+ for src in sources:
+ print(sources[src])
+ calls.append(call(sources[src], None))
+
+ mockobj.assert_has_calls(calls, any_order=True)
+
+ self.assertTrue(os.path.isfile(filename))
+
+ contents = util.load_file(filename)
+ self.assertTrue(
+ re.search(
+ r"%s %s %s %s\n"
+ % (
+ "deb",
+ "http://ppa.launchpad.net/smoser/cloud-init-test/ubuntu",
+ "xenial",
+ "main",
+ ),
+ contents,
+ flags=re.IGNORECASE,
+ )
+ )
+
+ def test_apt_src_keyid(self):
+ """Test specification of a source + keyid with filename being set"""
+ cfg = {
+ "source": (
+ "deb "
+ "http://ppa.launchpad.net/"
+ "smoser/cloud-init-test/ubuntu"
+ " xenial main"
+ ),
+ "keyid": "03683F77",
+ "filename": self.aptlistfile,
+ }
+ self.apt_src_keyid(self.aptlistfile, [cfg], 1)
+
+ def test_apt_src_keyid_tri(self):
+ """Test 3x specification of a source + keyid with filename being set"""
+ cfg1 = {
+ "source": (
+ "deb "
+ "http://ppa.launchpad.net/"
+ "smoser/cloud-init-test/ubuntu"
+ " xenial main"
+ ),
+ "keyid": "03683F77",
+ "filename": self.aptlistfile,
+ }
+ cfg2 = {
+ "source": (
+ "deb "
+ "http://ppa.launchpad.net/"
+ "smoser/cloud-init-test/ubuntu"
+ " xenial universe"
+ ),
+ "keyid": "03683F77",
+ "filename": self.aptlistfile2,
+ }
+ cfg3 = {
+ "source": (
+ "deb "
+ "http://ppa.launchpad.net/"
+ "smoser/cloud-init-test/ubuntu"
+ " xenial multiverse"
+ ),
+ "keyid": "03683F77",
+ "filename": self.aptlistfile3,
+ }
+
+ self.apt_src_keyid(self.aptlistfile, [cfg1, cfg2, cfg3], 3)
+ contents = util.load_file(self.aptlistfile2)
+ self.assertTrue(
+ re.search(
+ r"%s %s %s %s\n"
+ % (
+ "deb",
+ "http://ppa.launchpad.net/smoser/cloud-init-test/ubuntu",
+ "xenial",
+ "universe",
+ ),
+ contents,
+ flags=re.IGNORECASE,
+ )
+ )
+ contents = util.load_file(self.aptlistfile3)
+ self.assertTrue(
+ re.search(
+ r"%s %s %s %s\n"
+ % (
+ "deb",
+ "http://ppa.launchpad.net/smoser/cloud-init-test/ubuntu",
+ "xenial",
+ "multiverse",
+ ),
+ contents,
+ flags=re.IGNORECASE,
+ )
+ )
+
+ def test_apt_src_keyid_nofn(self):
+ """Test specification of a source + keyid without filename being set"""
+ cfg = {
+ "source": (
+ "deb "
+ "http://ppa.launchpad.net/"
+ "smoser/cloud-init-test/ubuntu"
+ " xenial main"
+ ),
+ "keyid": "03683F77",
+ }
+ with mock.patch.object(os.path, "join", side_effect=self.myjoin):
+ self.apt_src_keyid(self.fallbackfn, [cfg], 1)
+
+ def apt_src_key(self, filename, cfg):
+ """apt_src_key
+ Test specification of a source + key
+ """
+ cfg = self.wrapv1conf([cfg])
+
+ with mock.patch.object(cc_apt_configure, "add_apt_key") as mockobj:
+ cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
+
+ # check if it added the right amount of keys
+ sources = cfg["apt"]["sources"]
+ calls = []
+ for src in sources:
+ print(sources[src])
+ calls.append(call(sources[src], None))
+
+ mockobj.assert_has_calls(calls, any_order=True)
+
+ self.assertTrue(os.path.isfile(filename))
+
+ contents = util.load_file(filename)
+ self.assertTrue(
+ re.search(
+ r"%s %s %s %s\n"
+ % (
+ "deb",
+ "http://ppa.launchpad.net/smoser/cloud-init-test/ubuntu",
+ "xenial",
+ "main",
+ ),
+ contents,
+ flags=re.IGNORECASE,
+ )
+ )
+
+ def test_apt_src_key(self):
+ """Test specification of a source + key with filename being set"""
+ cfg = {
+ "source": (
+ "deb "
+ "http://ppa.launchpad.net/"
+ "smoser/cloud-init-test/ubuntu"
+ " xenial main"
+ ),
+ "key": "fakekey 4321",
+ "filename": self.aptlistfile,
+ }
+ self.apt_src_key(self.aptlistfile, cfg)
+
+ def test_apt_src_key_nofn(self):
+ """Test specification of a source + key without filename being set"""
+ cfg = {
+ "source": (
+ "deb "
+ "http://ppa.launchpad.net/"
+ "smoser/cloud-init-test/ubuntu"
+ " xenial main"
+ ),
+ "key": "fakekey 4321",
+ }
+ with mock.patch.object(os.path, "join", side_effect=self.myjoin):
+ self.apt_src_key(self.fallbackfn, cfg)
+
+ def test_apt_src_keyonly(self):
+ """Test specifying key without source"""
+ cfg = {"key": "fakekey 4242", "filename": self.aptlistfile}
+ cfg = self.wrapv1conf([cfg])
+ with mock.patch.object(cc_apt_configure, "apt_key") as mockobj:
+ cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
+
+ calls = (
+ call(
+ "add",
+ output_file=pathlib.Path(self.aptlistfile).stem,
+ data="fakekey 4242",
+ hardened=False,
+ ),
+ )
+ mockobj.assert_has_calls(calls, any_order=True)
+
+ # filename should be ignored on key only
+ self.assertFalse(os.path.isfile(self.aptlistfile))
+
+ def test_apt_src_keyidonly(self):
+ """Test specification of a keyid without source"""
+ cfg = {"keyid": "03683F77", "filename": self.aptlistfile}
+ cfg = self.wrapv1conf([cfg])
+
+ with mock.patch.object(
+ subp, "subp", return_value=("fakekey 1212", "")
+ ):
+ with mock.patch.object(cc_apt_configure, "apt_key") as mockobj:
+ cc_apt_configure.handle(
+ "test", cfg, self.fakecloud, None, None
+ )
+
+ calls = (
+ call(
+ "add",
+ output_file=pathlib.Path(self.aptlistfile).stem,
+ data="fakekey 1212",
+ hardened=False,
+ ),
+ )
+ mockobj.assert_has_calls(calls, any_order=True)
+
+ # filename should be ignored on key only
+ self.assertFalse(os.path.isfile(self.aptlistfile))
+
+ def apt_src_keyid_real(self, cfg, expectedkey, is_hardened=None):
+ """apt_src_keyid_real
+ Test specification of a keyid without source including
+ up to addition of the key (add_apt_key_raw mocked to keep the
+ environment as is)
+ """
+ key = cfg["keyid"]
+ keyserver = cfg.get("keyserver", "keyserver.ubuntu.com")
+ cfg = self.wrapv1conf([cfg])
+
+ with mock.patch.object(cc_apt_configure, "add_apt_key_raw") as mockkey:
+ with mock.patch.object(
+ gpg, "getkeybyid", return_value=expectedkey
+ ) as mockgetkey:
+ cc_apt_configure.handle(
+ "test", cfg, self.fakecloud, None, None
+ )
+ if is_hardened is not None:
+ mockkey.assert_called_with(
+ expectedkey, self.aptlistfile, hardened=is_hardened
+ )
+ else:
+ mockkey.assert_called_with(expectedkey, self.aptlistfile)
+ mockgetkey.assert_called_with(key, keyserver)
+
+ # filename should be ignored on key only
+ self.assertFalse(os.path.isfile(self.aptlistfile))
+
+ def test_apt_src_keyid_real(self):
+ """test_apt_src_keyid_real - Test keyid including key add"""
+ keyid = "03683F77"
+ cfg = {"keyid": keyid, "filename": self.aptlistfile}
+
+ self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False)
+
+ def test_apt_src_longkeyid_real(self):
+ """test_apt_src_longkeyid_real - Test long keyid including key add"""
+ keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77"
+ cfg = {"keyid": keyid, "filename": self.aptlistfile}
+
+ self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False)
+
+ def test_apt_src_longkeyid_ks_real(self):
+ """test_apt_src_longkeyid_ks_real - Test long keyid from other ks"""
+ keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77"
+ cfg = {
+ "keyid": keyid,
+ "keyserver": "keys.gnupg.net",
+ "filename": self.aptlistfile,
+ }
+
+ self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False)
+
+ def test_apt_src_ppa(self):
+ """Test adding a ppa"""
+ cfg = {
+ "source": "ppa:smoser/cloud-init-test",
+ "filename": self.aptlistfile,
+ }
+ cfg = self.wrapv1conf([cfg])
+
+ with mock.patch.object(subp, "subp") as mockobj:
+ cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
+ mockobj.assert_called_once_with(
+ ["add-apt-repository", "ppa:smoser/cloud-init-test"], target=None
+ )
+
+ # adding ppa should ignore filename (uses add-apt-repository)
+ self.assertFalse(os.path.isfile(self.aptlistfile))
+
+ def test_apt_src_ppa_tri(self):
+ """Test adding three ppa's"""
+ cfg1 = {
+ "source": "ppa:smoser/cloud-init-test",
+ "filename": self.aptlistfile,
+ }
+ cfg2 = {
+ "source": "ppa:smoser/cloud-init-test2",
+ "filename": self.aptlistfile2,
+ }
+ cfg3 = {
+ "source": "ppa:smoser/cloud-init-test3",
+ "filename": self.aptlistfile3,
+ }
+ cfg = self.wrapv1conf([cfg1, cfg2, cfg3])
+
+ with mock.patch.object(subp, "subp") as mockobj:
+ cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
+ calls = [
+ call(
+ ["add-apt-repository", "ppa:smoser/cloud-init-test"],
+ target=None,
+ ),
+ call(
+ ["add-apt-repository", "ppa:smoser/cloud-init-test2"],
+ target=None,
+ ),
+ call(
+ ["add-apt-repository", "ppa:smoser/cloud-init-test3"],
+ target=None,
+ ),
+ ]
+ mockobj.assert_has_calls(calls, any_order=True)
+
+ # adding ppa should ignore all filenames (uses add-apt-repository)
+ self.assertFalse(os.path.isfile(self.aptlistfile))
+ self.assertFalse(os.path.isfile(self.aptlistfile2))
+ self.assertFalse(os.path.isfile(self.aptlistfile3))
+
+ def test_convert_to_new_format(self):
+ """Test the conversion of old to new format"""
+ cfg1 = {
+ "source": "deb $MIRROR $RELEASE multiverse",
+ "filename": self.aptlistfile,
+ }
+ cfg2 = {
+ "source": "deb $MIRROR $RELEASE main",
+ "filename": self.aptlistfile2,
+ }
+ cfg3 = {
+ "source": "deb $MIRROR $RELEASE universe",
+ "filename": self.aptlistfile3,
+ }
+ cfg = {"apt_sources": [cfg1, cfg2, cfg3]}
+ checkcfg = {
+ self.aptlistfile: {
+ "filename": self.aptlistfile,
+ "source": "deb $MIRROR $RELEASE multiverse",
+ },
+ self.aptlistfile2: {
+ "filename": self.aptlistfile2,
+ "source": "deb $MIRROR $RELEASE main",
+ },
+ self.aptlistfile3: {
+ "filename": self.aptlistfile3,
+ "source": "deb $MIRROR $RELEASE universe",
+ },
+ }
+
+ newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg)
+ self.assertEqual(newcfg["apt"]["sources"], checkcfg)
+
+ # convert again, should stay the same
+ newcfg2 = cc_apt_configure.convert_to_v3_apt_format(newcfg)
+ self.assertEqual(newcfg2["apt"]["sources"], checkcfg)
+
+ # should work without raising an exception
+ cc_apt_configure.convert_to_v3_apt_format({})
+
+ with self.assertRaises(ValueError):
+ cc_apt_configure.convert_to_v3_apt_format({"apt_sources": 5})
+
+ def test_convert_to_new_format_collision(self):
+ """Test the conversion of old to new format with collisions
+ That matches e.g. the MAAS case specifying old and new config"""
+ cfg_1_and_3 = {
+ "apt": {"proxy": "http://192.168.122.1:8000/"},
+ "apt_proxy": "http://192.168.122.1:8000/",
+ }
+ cfg_3_only = {"apt": {"proxy": "http://192.168.122.1:8000/"}}
+ cfgconflict = {
+ "apt": {"proxy": "http://192.168.122.1:8000/"},
+ "apt_proxy": "ftp://192.168.122.1:8000/",
+ }
+
+ # collision (equal)
+ newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg_1_and_3)
+ self.assertEqual(newcfg, cfg_3_only)
+ # collision (equal, so ok to remove)
+ newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg_3_only)
+ self.assertEqual(newcfg, cfg_3_only)
+ # collision (unequal)
+ match = "Old and New.*unequal.*apt_proxy"
+ with self.assertRaisesRegex(ValueError, match):
+ cc_apt_configure.convert_to_v3_apt_format(cfgconflict)
+
+ def test_convert_to_new_format_dict_collision(self):
+ cfg1 = {
+ "source": "deb $MIRROR $RELEASE multiverse",
+ "filename": self.aptlistfile,
+ }
+ cfg2 = {
+ "source": "deb $MIRROR $RELEASE main",
+ "filename": self.aptlistfile2,
+ }
+ cfg3 = {
+ "source": "deb $MIRROR $RELEASE universe",
+ "filename": self.aptlistfile3,
+ }
+ fullv3 = {
+ self.aptlistfile: {
+ "filename": self.aptlistfile,
+ "source": "deb $MIRROR $RELEASE multiverse",
+ },
+ self.aptlistfile2: {
+ "filename": self.aptlistfile2,
+ "source": "deb $MIRROR $RELEASE main",
+ },
+ self.aptlistfile3: {
+ "filename": self.aptlistfile3,
+ "source": "deb $MIRROR $RELEASE universe",
+ },
+ }
+ cfg_3_only = {"apt": {"sources": fullv3}}
+ cfg_1_and_3 = {"apt_sources": [cfg1, cfg2, cfg3]}
+ cfg_1_and_3.update(cfg_3_only)
+
+ # collision (equal, so ok to remove)
+ newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg_1_and_3)
+ self.assertEqual(newcfg, cfg_3_only)
+ # no old spec (same result)
+ newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg_3_only)
+ self.assertEqual(newcfg, cfg_3_only)
+
+ diff = {
+ self.aptlistfile: {
+ "filename": self.aptlistfile,
+ "source": "deb $MIRROR $RELEASE DIFFERENTVERSE",
+ },
+ self.aptlistfile2: {
+ "filename": self.aptlistfile2,
+ "source": "deb $MIRROR $RELEASE main",
+ },
+ self.aptlistfile3: {
+ "filename": self.aptlistfile3,
+ "source": "deb $MIRROR $RELEASE universe",
+ },
+ }
+ cfg_3_only = {"apt": {"sources": diff}}
+ cfg_1_and_3_different = {"apt_sources": [cfg1, cfg2, cfg3]}
+ cfg_1_and_3_different.update(cfg_3_only)
+
+ # collision (unequal by dict having a different entry)
+ with self.assertRaises(ValueError):
+ cc_apt_configure.convert_to_v3_apt_format(cfg_1_and_3_different)
+
+ missing = {
+ self.aptlistfile: {
+ "filename": self.aptlistfile,
+ "source": "deb $MIRROR $RELEASE multiverse",
+ }
+ }
+ cfg_3_only = {"apt": {"sources": missing}}
+ cfg_1_and_3_missing = {"apt_sources": [cfg1, cfg2, cfg3]}
+ cfg_1_and_3_missing.update(cfg_3_only)
+ # collision (unequal by dict missing an entry)
+ with self.assertRaises(ValueError):
+ cc_apt_configure.convert_to_v3_apt_format(cfg_1_and_3_missing)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_apt_source_v3.py b/tests/unittests/config/test_apt_source_v3.py
new file mode 100644
index 00000000..75adc647
--- /dev/null
+++ b/tests/unittests/config/test_apt_source_v3.py
@@ -0,0 +1,1442 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""test_handler_apt_source_v3
+Testing various config variations of the apt_source custom config
+This tries to call all in the new v3 format and cares about new features
+"""
+import glob
+import os
+import pathlib
+import re
+import shutil
+import socket
+import tempfile
+from unittest import TestCase, mock
+from unittest.mock import call
+
+from cloudinit import gpg, subp, util
+from cloudinit.config import cc_apt_configure
+from tests.unittests import helpers as t_help
+from tests.unittests.util import get_cloud
+
+EXPECTEDKEY = """-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: GnuPG v1
+
+mI0ESuZLUgEEAKkqq3idtFP7g9hzOu1a8+v8ImawQN4TrvlygfScMU1TIS1eC7UQ
+NUA8Qqgr9iUaGnejb0VciqftLrU9D6WYHSKz+EITefgdyJ6SoQxjoJdsCpJ7o9Jy
+8PQnpRttiFm4qHu6BVnKnBNxw/z3ST9YMqW5kbMQpfxbGe+obRox59NpABEBAAG0
+HUxhdW5jaHBhZCBQUEEgZm9yIFNjb3R0IE1vc2VyiLYEEwECACAFAkrmS1ICGwMG
+CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRAGILvPA2g/d3aEA/9tVjc10HOZwV29
+OatVuTeERjjrIbxflO586GLA8cp0C9RQCwgod/R+cKYdQcHjbqVcP0HqxveLg0RZ
+FJpWLmWKamwkABErwQLGlM/Hwhjfade8VvEQutH5/0JgKHmzRsoqfR+LMO6OS+Sm
+S0ORP6HXET3+jC8BMG4tBWCTK/XEZw==
+=ACB2
+-----END PGP PUBLIC KEY BLOCK-----"""
+
+ADD_APT_REPO_MATCH = r"^[\w-]+:\w"
+
+TARGET = None
+
+MOCK_LSB_RELEASE_DATA = {
+ "id": "Ubuntu",
+ "description": "Ubuntu 18.04.1 LTS",
+ "release": "18.04",
+ "codename": "bionic",
+}
+
+
+class FakeDatasource:
+ """Fake Datasource helper object"""
+
+ def __init__(self):
+ self.region = "region"
+
+
+class FakeCloud:
+ """Fake Cloud helper object"""
+
+ def __init__(self):
+ self.datasource = FakeDatasource()
+
+
+class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
+ """TestAptSourceConfig
+ Main Class to test apt configs
+ """
+
+ def setUp(self):
+ super(TestAptSourceConfig, self).setUp()
+ self.tmp = tempfile.mkdtemp()
+ self.new_root = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, self.tmp)
+ self.addCleanup(shutil.rmtree, self.new_root)
+ self.aptlistfile = os.path.join(self.tmp, "single-deb.list")
+ self.aptlistfile2 = os.path.join(self.tmp, "single-deb2.list")
+ self.aptlistfile3 = os.path.join(self.tmp, "single-deb3.list")
+ self.join = os.path.join
+ self.matcher = re.compile(ADD_APT_REPO_MATCH).search
+ self.add_patch(
+ "cloudinit.config.cc_apt_configure.util.lsb_release",
+ "m_lsb_release",
+ return_value=MOCK_LSB_RELEASE_DATA.copy(),
+ )
+
+ @staticmethod
+ def _add_apt_sources(*args, **kwargs):
+ with mock.patch.object(cc_apt_configure, "update_packages"):
+ cc_apt_configure.add_apt_sources(*args, **kwargs)
+
+ @staticmethod
+ def _get_default_params():
+ """get_default_params
+ Get the most basic default mrror and release info to be used in tests
+ """
+ params = {}
+ params["RELEASE"] = MOCK_LSB_RELEASE_DATA["release"]
+ arch = "amd64"
+ params["MIRROR"] = cc_apt_configure.get_default_mirrors(arch)[
+ "PRIMARY"
+ ]
+ return params
+
+ def _myjoin(self, *args, **kwargs):
+ """_myjoin - redir into writable tmpdir"""
+ if (
+ args[0] == "/etc/apt/sources.list.d/"
+ and args[1] == "cloud_config_sources.list"
+ and len(args) == 2
+ ):
+ return self.join(self.tmp, args[0].lstrip("/"), args[1])
+ else:
+ return self.join(*args, **kwargs)
+
+ def _apt_src_basic(self, filename, cfg):
+ """_apt_src_basic
+ Test Fix deb source string, has to overwrite mirror conf in params
+ """
+ params = self._get_default_params()
+
+ self._add_apt_sources(
+ cfg, TARGET, template_params=params, aa_repo_match=self.matcher
+ )
+
+ self.assertTrue(os.path.isfile(filename))
+
+ contents = util.load_file(filename)
+ self.assertTrue(
+ re.search(
+ r"%s %s %s %s\n"
+ % (
+ "deb",
+ "http://test.ubuntu.com/ubuntu",
+ "karmic-backports",
+ "main universe multiverse restricted",
+ ),
+ contents,
+ flags=re.IGNORECASE,
+ )
+ )
+
+ def test_apt_v3_src_basic(self):
+ """test_apt_v3_src_basic - Test fix deb source string"""
+ cfg = {
+ self.aptlistfile: {
+ "source": (
+ "deb http://test.ubuntu.com/ubuntu"
+ " karmic-backports"
+ " main universe multiverse restricted"
+ )
+ }
+ }
+ self._apt_src_basic(self.aptlistfile, cfg)
+
+ def test_apt_v3_src_basic_tri(self):
+ """test_apt_v3_src_basic_tri - Test multiple fix deb source strings"""
+ cfg = {
+ self.aptlistfile: {
+ "source": (
+ "deb http://test.ubuntu.com/ubuntu"
+ " karmic-backports"
+ " main universe multiverse restricted"
+ )
+ },
+ self.aptlistfile2: {
+ "source": (
+ "deb http://test.ubuntu.com/ubuntu"
+ " precise-backports"
+ " main universe multiverse restricted"
+ )
+ },
+ self.aptlistfile3: {
+ "source": (
+ "deb http://test.ubuntu.com/ubuntu"
+ " lucid-backports"
+ " main universe multiverse restricted"
+ )
+ },
+ }
+ self._apt_src_basic(self.aptlistfile, cfg)
+
+ # extra verify on two extra files of this test
+ contents = util.load_file(self.aptlistfile2)
+ self.assertTrue(
+ re.search(
+ r"%s %s %s %s\n"
+ % (
+ "deb",
+ "http://test.ubuntu.com/ubuntu",
+ "precise-backports",
+ "main universe multiverse restricted",
+ ),
+ contents,
+ flags=re.IGNORECASE,
+ )
+ )
+ contents = util.load_file(self.aptlistfile3)
+ self.assertTrue(
+ re.search(
+ r"%s %s %s %s\n"
+ % (
+ "deb",
+ "http://test.ubuntu.com/ubuntu",
+ "lucid-backports",
+ "main universe multiverse restricted",
+ ),
+ contents,
+ flags=re.IGNORECASE,
+ )
+ )
+
+ def _apt_src_replacement(self, filename, cfg):
+ """apt_src_replace
+ Test Autoreplacement of MIRROR and RELEASE in source specs
+ """
+ params = self._get_default_params()
+ self._add_apt_sources(
+ cfg, TARGET, template_params=params, aa_repo_match=self.matcher
+ )
+
+ self.assertTrue(os.path.isfile(filename))
+
+ contents = util.load_file(filename)
+ self.assertTrue(
+ re.search(
+ r"%s %s %s %s\n"
+ % ("deb", params["MIRROR"], params["RELEASE"], "multiverse"),
+ contents,
+ flags=re.IGNORECASE,
+ )
+ )
+
+ def test_apt_v3_src_replace(self):
+ """test_apt_v3_src_replace - Test replacement of MIRROR & RELEASE"""
+ cfg = {self.aptlistfile: {"source": "deb $MIRROR $RELEASE multiverse"}}
+ self._apt_src_replacement(self.aptlistfile, cfg)
+
+ def test_apt_v3_src_replace_fn(self):
+ """test_apt_v3_src_replace_fn - Test filename overwritten in dict"""
+ cfg = {
+ "ignored": {
+ "source": "deb $MIRROR $RELEASE multiverse",
+ "filename": self.aptlistfile,
+ }
+ }
+ # second file should overwrite the dict key
+ self._apt_src_replacement(self.aptlistfile, cfg)
+
+ def _apt_src_replace_tri(self, cfg):
+ """_apt_src_replace_tri
+ Test three autoreplacements of MIRROR and RELEASE in source specs with
+ generic part
+ """
+ self._apt_src_replacement(self.aptlistfile, cfg)
+
+ # extra verify on two extra files of this test
+ params = self._get_default_params()
+ contents = util.load_file(self.aptlistfile2)
+ self.assertTrue(
+ re.search(
+ r"%s %s %s %s\n"
+ % ("deb", params["MIRROR"], params["RELEASE"], "main"),
+ contents,
+ flags=re.IGNORECASE,
+ )
+ )
+ contents = util.load_file(self.aptlistfile3)
+ self.assertTrue(
+ re.search(
+ r"%s %s %s %s\n"
+ % ("deb", params["MIRROR"], params["RELEASE"], "universe"),
+ contents,
+ flags=re.IGNORECASE,
+ )
+ )
+
+ def test_apt_v3_src_replace_tri(self):
+ """test_apt_v3_src_replace_tri - Test multiple replace/overwrites"""
+ cfg = {
+ self.aptlistfile: {"source": "deb $MIRROR $RELEASE multiverse"},
+ "notused": {
+ "source": "deb $MIRROR $RELEASE main",
+ "filename": self.aptlistfile2,
+ },
+ self.aptlistfile3: {"source": "deb $MIRROR $RELEASE universe"},
+ }
+ self._apt_src_replace_tri(cfg)
+
+ def _apt_src_keyid(self, filename, cfg, keynum, is_hardened=None):
+ """_apt_src_keyid
+ Test specification of a source + keyid
+ """
+ params = self._get_default_params()
+
+ with mock.patch.object(cc_apt_configure, "add_apt_key") as mockobj:
+ self._add_apt_sources(
+ cfg, TARGET, template_params=params, aa_repo_match=self.matcher
+ )
+
+ # check if it added the right number of keys
+ calls = []
+ for key in cfg:
+ if is_hardened is not None:
+ calls.append(call(cfg[key], hardened=is_hardened))
+ else:
+ calls.append(call(cfg[key], TARGET))
+
+ mockobj.assert_has_calls(calls, any_order=True)
+
+ self.assertTrue(os.path.isfile(filename))
+
+ contents = util.load_file(filename)
+ self.assertTrue(
+ re.search(
+ r"%s %s %s %s\n"
+ % (
+ "deb",
+ "http://ppa.launchpad.net/smoser/cloud-init-test/ubuntu",
+ "xenial",
+ "main",
+ ),
+ contents,
+ flags=re.IGNORECASE,
+ )
+ )
+
+ def test_apt_v3_src_keyid(self):
+ """test_apt_v3_src_keyid - Test source + keyid with filename"""
+ cfg = {
+ self.aptlistfile: {
+ "source": (
+ "deb "
+ "http://ppa.launchpad.net/"
+ "smoser/cloud-init-test/ubuntu"
+ " xenial main"
+ ),
+ "filename": self.aptlistfile,
+ "keyid": "03683F77",
+ }
+ }
+ self._apt_src_keyid(self.aptlistfile, cfg, 1)
+
+ def test_apt_v3_src_keyid_tri(self):
+ """test_apt_v3_src_keyid_tri - Test multiple src+key+filen writes"""
+ cfg = {
+ self.aptlistfile: {
+ "source": (
+ "deb "
+ "http://ppa.launchpad.net/"
+ "smoser/cloud-init-test/ubuntu"
+ " xenial main"
+ ),
+ "keyid": "03683F77",
+ },
+ "ignored": {
+ "source": (
+ "deb "
+ "http://ppa.launchpad.net/"
+ "smoser/cloud-init-test/ubuntu"
+ " xenial universe"
+ ),
+ "keyid": "03683F77",
+ "filename": self.aptlistfile2,
+ },
+ self.aptlistfile3: {
+ "source": (
+ "deb "
+ "http://ppa.launchpad.net/"
+ "smoser/cloud-init-test/ubuntu"
+ " xenial multiverse"
+ ),
+ "filename": self.aptlistfile3,
+ "keyid": "03683F77",
+ },
+ }
+
+ self._apt_src_keyid(self.aptlistfile, cfg, 3)
+ contents = util.load_file(self.aptlistfile2)
+ self.assertTrue(
+ re.search(
+ r"%s %s %s %s\n"
+ % (
+ "deb",
+ "http://ppa.launchpad.net/smoser/cloud-init-test/ubuntu",
+ "xenial",
+ "universe",
+ ),
+ contents,
+ flags=re.IGNORECASE,
+ )
+ )
+ contents = util.load_file(self.aptlistfile3)
+ self.assertTrue(
+ re.search(
+ r"%s %s %s %s\n"
+ % (
+ "deb",
+ "http://ppa.launchpad.net/smoser/cloud-init-test/ubuntu",
+ "xenial",
+ "multiverse",
+ ),
+ contents,
+ flags=re.IGNORECASE,
+ )
+ )
+
+ def test_apt_v3_src_key(self):
+ """test_apt_v3_src_key - Test source + key"""
+ params = self._get_default_params()
+ cfg = {
+ self.aptlistfile: {
+ "source": (
+ "deb "
+ "http://ppa.launchpad.net/"
+ "smoser/cloud-init-test/ubuntu"
+ " xenial main"
+ ),
+ "filename": self.aptlistfile,
+ "key": "fakekey 4321",
+ }
+ }
+
+ with mock.patch.object(cc_apt_configure, "apt_key") as mockobj:
+ self._add_apt_sources(
+ cfg, TARGET, template_params=params, aa_repo_match=self.matcher
+ )
+
+ calls = (
+ call(
+ "add",
+ output_file=pathlib.Path(self.aptlistfile).stem,
+ data="fakekey 4321",
+ hardened=False,
+ ),
+ )
+ mockobj.assert_has_calls(calls, any_order=True)
+ self.assertTrue(os.path.isfile(self.aptlistfile))
+
+ contents = util.load_file(self.aptlistfile)
+ self.assertTrue(
+ re.search(
+ r"%s %s %s %s\n"
+ % (
+ "deb",
+ "http://ppa.launchpad.net/smoser/cloud-init-test/ubuntu",
+ "xenial",
+ "main",
+ ),
+ contents,
+ flags=re.IGNORECASE,
+ )
+ )
+
+ def test_apt_v3_src_keyonly(self):
+ """test_apt_v3_src_keyonly - Test key without source"""
+ params = self._get_default_params()
+ cfg = {self.aptlistfile: {"key": "fakekey 4242"}}
+
+ with mock.patch.object(cc_apt_configure, "apt_key") as mockobj:
+ self._add_apt_sources(
+ cfg, TARGET, template_params=params, aa_repo_match=self.matcher
+ )
+
+ calls = (
+ call(
+ "add",
+ output_file=pathlib.Path(self.aptlistfile).stem,
+ data="fakekey 4242",
+ hardened=False,
+ ),
+ )
+ mockobj.assert_has_calls(calls, any_order=True)
+
+ # filename should be ignored on key only
+ self.assertFalse(os.path.isfile(self.aptlistfile))
+
+ def test_apt_v3_src_keyidonly(self):
+ """test_apt_v3_src_keyidonly - Test keyid without source"""
+ params = self._get_default_params()
+ cfg = {self.aptlistfile: {"keyid": "03683F77"}}
+ with mock.patch.object(
+ subp, "subp", return_value=("fakekey 1212", "")
+ ):
+ with mock.patch.object(cc_apt_configure, "apt_key") as mockobj:
+ self._add_apt_sources(
+ cfg,
+ TARGET,
+ template_params=params,
+ aa_repo_match=self.matcher,
+ )
+
+ calls = (
+ call(
+ "add",
+ output_file=pathlib.Path(self.aptlistfile).stem,
+ data="fakekey 1212",
+ hardened=False,
+ ),
+ )
+ mockobj.assert_has_calls(calls, any_order=True)
+
+ # filename should be ignored on key only
+ self.assertFalse(os.path.isfile(self.aptlistfile))
+
+ def apt_src_keyid_real(self, cfg, expectedkey, is_hardened=None):
+ """apt_src_keyid_real
+ Test specification of a keyid without source including
+ up to addition of the key (add_apt_key_raw mocked to keep the
+ environment as is)
+ """
+ params = self._get_default_params()
+
+ with mock.patch.object(cc_apt_configure, "add_apt_key_raw") as mockkey:
+ with mock.patch.object(
+ gpg, "getkeybyid", return_value=expectedkey
+ ) as mockgetkey:
+ self._add_apt_sources(
+ cfg,
+ TARGET,
+ template_params=params,
+ aa_repo_match=self.matcher,
+ )
+
+ keycfg = cfg[self.aptlistfile]
+ mockgetkey.assert_called_with(
+ keycfg["keyid"], keycfg.get("keyserver", "keyserver.ubuntu.com")
+ )
+ if is_hardened is not None:
+ mockkey.assert_called_with(
+ expectedkey, keycfg["keyfile"], hardened=is_hardened
+ )
+
+ # filename should be ignored on key only
+ self.assertFalse(os.path.isfile(self.aptlistfile))
+
+ def test_apt_v3_src_keyid_real(self):
+ """test_apt_v3_src_keyid_real - Test keyid including key add"""
+ keyid = "03683F77"
+ cfg = {self.aptlistfile: {"keyid": keyid, "keyfile": self.aptlistfile}}
+
+ self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False)
+
+ def test_apt_v3_src_longkeyid_real(self):
+ """test_apt_v3_src_longkeyid_real Test long keyid including key add"""
+ keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77"
+ cfg = {self.aptlistfile: {"keyid": keyid, "keyfile": self.aptlistfile}}
+
+ self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False)
+
+ def test_apt_v3_src_longkeyid_ks_real(self):
+ """test_apt_v3_src_longkeyid_ks_real Test long keyid from other ks"""
+ keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77"
+ cfg = {
+ self.aptlistfile: {
+ "keyid": keyid,
+ "keyfile": self.aptlistfile,
+ "keyserver": "keys.gnupg.net",
+ }
+ }
+
+ self.apt_src_keyid_real(cfg, EXPECTEDKEY)
+
+ def test_apt_v3_src_keyid_keyserver(self):
+ """test_apt_v3_src_keyid_keyserver - Test custom keyserver"""
+ keyid = "03683F77"
+ params = self._get_default_params()
+ cfg = {
+ self.aptlistfile: {
+ "keyid": keyid,
+ "keyfile": self.aptlistfile,
+ "keyserver": "test.random.com",
+ }
+ }
+
+ # in some test environments only *.ubuntu.com is reachable
+ # so mock the call and check if the config got there
+ with mock.patch.object(
+ gpg, "getkeybyid", return_value="fakekey"
+ ) as mockgetkey:
+ with mock.patch.object(
+ cc_apt_configure, "add_apt_key_raw"
+ ) as mockadd:
+ self._add_apt_sources(
+ cfg,
+ TARGET,
+ template_params=params,
+ aa_repo_match=self.matcher,
+ )
+
+ mockgetkey.assert_called_with("03683F77", "test.random.com")
+ mockadd.assert_called_with("fakekey", self.aptlistfile, hardened=False)
+
+ # filename should be ignored on key only
+ self.assertFalse(os.path.isfile(self.aptlistfile))
+
+ def test_apt_v3_src_ppa(self):
+ """test_apt_v3_src_ppa - Test specification of a ppa"""
+ params = self._get_default_params()
+ cfg = {self.aptlistfile: {"source": "ppa:smoser/cloud-init-test"}}
+
+ with mock.patch("cloudinit.subp.subp") as mockobj:
+ self._add_apt_sources(
+ cfg, TARGET, template_params=params, aa_repo_match=self.matcher
+ )
+ mockobj.assert_any_call(
+ ["add-apt-repository", "ppa:smoser/cloud-init-test"], target=TARGET
+ )
+
+ # adding ppa should ignore filename (uses add-apt-repository)
+ self.assertFalse(os.path.isfile(self.aptlistfile))
+
+ def test_apt_v3_src_ppa_tri(self):
+ """test_apt_v3_src_ppa_tri - Test specification of multiple ppa's"""
+ params = self._get_default_params()
+ cfg = {
+ self.aptlistfile: {"source": "ppa:smoser/cloud-init-test"},
+ self.aptlistfile2: {"source": "ppa:smoser/cloud-init-test2"},
+ self.aptlistfile3: {"source": "ppa:smoser/cloud-init-test3"},
+ }
+
+ with mock.patch("cloudinit.subp.subp") as mockobj:
+ self._add_apt_sources(
+ cfg, TARGET, template_params=params, aa_repo_match=self.matcher
+ )
+ calls = [
+ call(
+ ["add-apt-repository", "ppa:smoser/cloud-init-test"],
+ target=TARGET,
+ ),
+ call(
+ ["add-apt-repository", "ppa:smoser/cloud-init-test2"],
+ target=TARGET,
+ ),
+ call(
+ ["add-apt-repository", "ppa:smoser/cloud-init-test3"],
+ target=TARGET,
+ ),
+ ]
+ mockobj.assert_has_calls(calls, any_order=True)
+
+ # adding ppa should ignore all filenames (uses add-apt-repository)
+ self.assertFalse(os.path.isfile(self.aptlistfile))
+ self.assertFalse(os.path.isfile(self.aptlistfile2))
+ self.assertFalse(os.path.isfile(self.aptlistfile3))
+
+ @mock.patch("cloudinit.config.cc_apt_configure.util.get_dpkg_architecture")
+ def test_apt_v3_list_rename(self, m_get_dpkg_architecture):
+ """test_apt_v3_list_rename - Test find mirror and apt list renaming"""
+ pre = "/var/lib/apt/lists"
+ # filenames are archive dependent
+
+ arch = "s390x"
+ m_get_dpkg_architecture.return_value = arch
+ component = "ubuntu-ports"
+ archive = "ports.ubuntu.com"
+
+ cfg = {
+ "primary": [
+ {
+ "arches": ["default"],
+ "uri": "http://test.ubuntu.com/%s/" % component,
+ }
+ ],
+ "security": [
+ {
+ "arches": ["default"],
+ "uri": "http://testsec.ubuntu.com/%s/" % component,
+ }
+ ],
+ }
+ post = "%s_dists_%s-updates_InRelease" % (
+ component,
+ MOCK_LSB_RELEASE_DATA["codename"],
+ )
+ fromfn = "%s/%s_%s" % (pre, archive, post)
+ tofn = "%s/test.ubuntu.com_%s" % (pre, post)
+
+ mirrors = cc_apt_configure.find_apt_mirror_info(cfg, FakeCloud(), arch)
+
+ self.assertEqual(
+ mirrors["MIRROR"], "http://test.ubuntu.com/%s/" % component
+ )
+ self.assertEqual(
+ mirrors["PRIMARY"], "http://test.ubuntu.com/%s/" % component
+ )
+ self.assertEqual(
+ mirrors["SECURITY"], "http://testsec.ubuntu.com/%s/" % component
+ )
+
+ with mock.patch.object(os, "rename") as mockren:
+ with mock.patch.object(glob, "glob", return_value=[fromfn]):
+ cc_apt_configure.rename_apt_lists(mirrors, TARGET, arch)
+
+ mockren.assert_any_call(fromfn, tofn)
+
+ @mock.patch("cloudinit.config.cc_apt_configure.util.get_dpkg_architecture")
+ def test_apt_v3_list_rename_non_slash(self, m_get_dpkg_architecture):
+ target = os.path.join(self.tmp, "rename_non_slash")
+ apt_lists_d = os.path.join(target, "./" + cc_apt_configure.APT_LISTS)
+
+ arch = "amd64"
+ m_get_dpkg_architecture.return_value = arch
+
+ mirror_path = "some/random/path/"
+ primary = "http://test.ubuntu.com/" + mirror_path
+ security = "http://test-security.ubuntu.com/" + mirror_path
+ mirrors = {"PRIMARY": primary, "SECURITY": security}
+
+ # these match default archive prefixes
+ opri_pre = "archive.ubuntu.com_ubuntu_dists_xenial"
+ osec_pre = "security.ubuntu.com_ubuntu_dists_xenial"
+ # this one won't match and should not be renamed defaults.
+ other_pre = "dl.google.com_linux_chrome_deb_dists_stable"
+ # these are our new expected prefixes
+ npri_pre = "test.ubuntu.com_some_random_path_dists_xenial"
+ nsec_pre = "test-security.ubuntu.com_some_random_path_dists_xenial"
+
+ files = [
+ # orig prefix, new prefix, suffix
+ (opri_pre, npri_pre, "_main_binary-amd64_Packages"),
+ (opri_pre, npri_pre, "_main_binary-amd64_InRelease"),
+ (opri_pre, npri_pre, "-updates_main_binary-amd64_Packages"),
+ (opri_pre, npri_pre, "-updates_main_binary-amd64_InRelease"),
+ (other_pre, other_pre, "_main_binary-amd64_Packages"),
+ (other_pre, other_pre, "_Release"),
+ (other_pre, other_pre, "_Release.gpg"),
+ (osec_pre, nsec_pre, "_InRelease"),
+ (osec_pre, nsec_pre, "_main_binary-amd64_Packages"),
+ (osec_pre, nsec_pre, "_universe_binary-amd64_Packages"),
+ ]
+
+ expected = sorted([npre + suff for opre, npre, suff in files])
+ # create files
+ for (opre, _npre, suff) in files:
+ fpath = os.path.join(apt_lists_d, opre + suff)
+ util.write_file(fpath, content=fpath)
+
+ cc_apt_configure.rename_apt_lists(mirrors, target, arch)
+ found = sorted(os.listdir(apt_lists_d))
+ self.assertEqual(expected, found)
+
+ @staticmethod
+ def test_apt_v3_proxy():
+ """test_apt_v3_proxy - Test apt_*proxy configuration"""
+ cfg = {
+ "proxy": "foobar1",
+ "http_proxy": "foobar2",
+ "ftp_proxy": "foobar3",
+ "https_proxy": "foobar4",
+ }
+
+ with mock.patch.object(util, "write_file") as mockobj:
+ cc_apt_configure.apply_apt_config(cfg, "proxyfn", "notused")
+
+ mockobj.assert_called_with(
+ "proxyfn",
+ 'Acquire::http::Proxy "foobar1";\n'
+ 'Acquire::http::Proxy "foobar2";\n'
+ 'Acquire::ftp::Proxy "foobar3";\n'
+ 'Acquire::https::Proxy "foobar4";\n',
+ )
+
+ def test_apt_v3_mirror(self):
+ """test_apt_v3_mirror - Test defining a mirror"""
+ pmir = "http://us.archive.ubuntu.com/ubuntu/"
+ smir = "http://security.ubuntu.com/ubuntu/"
+ cfg = {
+ "primary": [{"arches": ["default"], "uri": pmir}],
+ "security": [{"arches": ["default"], "uri": smir}],
+ }
+
+ mirrors = cc_apt_configure.find_apt_mirror_info(
+ cfg, FakeCloud(), "amd64"
+ )
+
+ self.assertEqual(mirrors["MIRROR"], pmir)
+ self.assertEqual(mirrors["PRIMARY"], pmir)
+ self.assertEqual(mirrors["SECURITY"], smir)
+
+ def test_apt_v3_mirror_default(self):
+ """test_apt_v3_mirror_default - Test without defining a mirror"""
+ arch = "amd64"
+ default_mirrors = cc_apt_configure.get_default_mirrors(arch)
+ pmir = default_mirrors["PRIMARY"]
+ smir = default_mirrors["SECURITY"]
+ mycloud = get_cloud()
+ mirrors = cc_apt_configure.find_apt_mirror_info({}, mycloud, arch)
+
+ self.assertEqual(mirrors["MIRROR"], pmir)
+ self.assertEqual(mirrors["PRIMARY"], pmir)
+ self.assertEqual(mirrors["SECURITY"], smir)
+
+ def test_apt_v3_mirror_arches(self):
+ """test_apt_v3_mirror_arches - Test arches selection of mirror"""
+ pmir = "http://my-primary.ubuntu.com/ubuntu/"
+ smir = "http://my-security.ubuntu.com/ubuntu/"
+ arch = "ppc64el"
+ cfg = {
+ "primary": [
+ {"arches": ["default"], "uri": "notthis-primary"},
+ {"arches": [arch], "uri": pmir},
+ ],
+ "security": [
+ {"arches": ["default"], "uri": "nothis-security"},
+ {"arches": [arch], "uri": smir},
+ ],
+ }
+
+ mirrors = cc_apt_configure.find_apt_mirror_info(cfg, FakeCloud(), arch)
+
+ self.assertEqual(mirrors["PRIMARY"], pmir)
+ self.assertEqual(mirrors["MIRROR"], pmir)
+ self.assertEqual(mirrors["SECURITY"], smir)
+
+ def test_apt_v3_mirror_arches_default(self):
+ """test_apt_v3_mirror_arches - Test falling back to default arch"""
+ pmir = "http://us.archive.ubuntu.com/ubuntu/"
+ smir = "http://security.ubuntu.com/ubuntu/"
+ cfg = {
+ "primary": [
+ {"arches": ["default"], "uri": pmir},
+ {"arches": ["thisarchdoesntexist"], "uri": "notthis"},
+ ],
+ "security": [
+ {"arches": ["thisarchdoesntexist"], "uri": "nothat"},
+ {"arches": ["default"], "uri": smir},
+ ],
+ }
+
+ mirrors = cc_apt_configure.find_apt_mirror_info(
+ cfg, FakeCloud(), "amd64"
+ )
+
+ self.assertEqual(mirrors["MIRROR"], pmir)
+ self.assertEqual(mirrors["PRIMARY"], pmir)
+ self.assertEqual(mirrors["SECURITY"], smir)
+
+ @mock.patch("cloudinit.config.cc_apt_configure.util.get_dpkg_architecture")
+ def test_apt_v3_get_def_mir_non_intel_no_arch(
+ self, m_get_dpkg_architecture
+ ):
+ arch = "ppc64el"
+ m_get_dpkg_architecture.return_value = arch
+ expected = {
+ "PRIMARY": "http://ports.ubuntu.com/ubuntu-ports",
+ "SECURITY": "http://ports.ubuntu.com/ubuntu-ports",
+ }
+ self.assertEqual(expected, cc_apt_configure.get_default_mirrors())
+
+ def test_apt_v3_get_default_mirrors_non_intel_with_arch(self):
+ found = cc_apt_configure.get_default_mirrors("ppc64el")
+
+ expected = {
+ "PRIMARY": "http://ports.ubuntu.com/ubuntu-ports",
+ "SECURITY": "http://ports.ubuntu.com/ubuntu-ports",
+ }
+ self.assertEqual(expected, found)
+
+ def test_apt_v3_mirror_arches_sysdefault(self):
+ """test_apt_v3_mirror_arches - Test arches fallback to sys default"""
+ arch = "amd64"
+ default_mirrors = cc_apt_configure.get_default_mirrors(arch)
+ pmir = default_mirrors["PRIMARY"]
+ smir = default_mirrors["SECURITY"]
+ mycloud = get_cloud()
+ cfg = {
+ "primary": [
+ {"arches": ["thisarchdoesntexist_64"], "uri": "notthis"},
+ {"arches": ["thisarchdoesntexist"], "uri": "notthiseither"},
+ ],
+ "security": [
+ {"arches": ["thisarchdoesntexist"], "uri": "nothat"},
+ {"arches": ["thisarchdoesntexist_64"], "uri": "nothateither"},
+ ],
+ }
+
+ mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch)
+
+ self.assertEqual(mirrors["MIRROR"], pmir)
+ self.assertEqual(mirrors["PRIMARY"], pmir)
+ self.assertEqual(mirrors["SECURITY"], smir)
+
+ def test_apt_v3_mirror_search(self):
+ """test_apt_v3_mirror_search - Test searching mirrors in a list
+ mock checks to avoid relying on network connectivity"""
+ pmir = "http://us.archive.ubuntu.com/ubuntu/"
+ smir = "http://security.ubuntu.com/ubuntu/"
+ cfg = {
+ "primary": [{"arches": ["default"], "search": ["pfailme", pmir]}],
+ "security": [{"arches": ["default"], "search": ["sfailme", smir]}],
+ }
+
+ with mock.patch.object(
+ cc_apt_configure.util,
+ "search_for_mirror",
+ side_effect=[pmir, smir],
+ ) as mocksearch:
+ mirrors = cc_apt_configure.find_apt_mirror_info(
+ cfg, FakeCloud(), "amd64"
+ )
+
+ calls = [call(["pfailme", pmir]), call(["sfailme", smir])]
+ mocksearch.assert_has_calls(calls)
+
+ self.assertEqual(mirrors["MIRROR"], pmir)
+ self.assertEqual(mirrors["PRIMARY"], pmir)
+ self.assertEqual(mirrors["SECURITY"], smir)
+
+ def test_apt_v3_mirror_search_many2(self):
+ """test_apt_v3_mirror_search_many3 - Test both mirrors specs at once"""
+ pmir = "http://us.archive.ubuntu.com/ubuntu/"
+ smir = "http://security.ubuntu.com/ubuntu/"
+ cfg = {
+ "primary": [
+ {
+ "arches": ["default"],
+ "uri": pmir,
+ "search": ["pfailme", "foo"],
+ }
+ ],
+ "security": [
+ {
+ "arches": ["default"],
+ "uri": smir,
+ "search": ["sfailme", "bar"],
+ }
+ ],
+ }
+
+ arch = "amd64"
+
+ # should be called only once per type, despite two mirror configs
+ mycloud = None
+ with mock.patch.object(
+ cc_apt_configure, "get_mirror", return_value="http://mocked/foo"
+ ) as mockgm:
+ mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch)
+ calls = [
+ call(cfg, "primary", arch, mycloud),
+ call(cfg, "security", arch, mycloud),
+ ]
+ mockgm.assert_has_calls(calls)
+
+ # should not be called, since primary is specified
+ with mock.patch.object(
+ cc_apt_configure.util, "search_for_mirror"
+ ) as mockse:
+ mirrors = cc_apt_configure.find_apt_mirror_info(
+ cfg, FakeCloud(), arch
+ )
+ mockse.assert_not_called()
+
+ self.assertEqual(mirrors["MIRROR"], pmir)
+ self.assertEqual(mirrors["PRIMARY"], pmir)
+ self.assertEqual(mirrors["SECURITY"], smir)
+
+ def test_apt_v3_url_resolvable(self):
+ """test_apt_v3_url_resolvable - Test resolving urls"""
+
+ with mock.patch.object(util, "is_resolvable") as mockresolve:
+ util.is_resolvable_url("http://1.2.3.4/ubuntu")
+ mockresolve.assert_called_with("1.2.3.4")
+
+ with mock.patch.object(util, "is_resolvable") as mockresolve:
+ util.is_resolvable_url("http://us.archive.ubuntu.com/ubuntu")
+ mockresolve.assert_called_with("us.archive.ubuntu.com")
+
+ # former tests can leave this set (or not if the test is ran directly)
+ # do a hard reset to ensure a stable result
+ util._DNS_REDIRECT_IP = None
+ bad = [(None, None, None, "badname", ["10.3.2.1"])]
+ good = [(None, None, None, "goodname", ["10.2.3.4"])]
+ with mock.patch.object(
+ socket, "getaddrinfo", side_effect=[bad, bad, bad, good, good]
+ ) as mocksock:
+ ret = util.is_resolvable_url("http://us.archive.ubuntu.com/ubuntu")
+ ret2 = util.is_resolvable_url("http://1.2.3.4/ubuntu")
+ mocksock.assert_any_call(
+ "does-not-exist.example.com.", None, 0, 0, 1, 2
+ )
+ mocksock.assert_any_call("example.invalid.", None, 0, 0, 1, 2)
+ mocksock.assert_any_call("us.archive.ubuntu.com", None)
+ mocksock.assert_any_call("1.2.3.4", None)
+
+ self.assertTrue(ret)
+ self.assertTrue(ret2)
+
+ # side effect need only bad ret after initial call
+ with mock.patch.object(
+ socket, "getaddrinfo", side_effect=[bad]
+ ) as mocksock:
+ ret3 = util.is_resolvable_url("http://failme.com/ubuntu")
+ calls = [call("failme.com", None)]
+ mocksock.assert_has_calls(calls)
+ self.assertFalse(ret3)
+
+ def test_apt_v3_disable_suites(self):
+ """test_disable_suites - disable_suites with many configurations"""
+ release = "xenial"
+ orig = """deb http://ubuntu.com//ubuntu xenial main
+deb http://ubuntu.com//ubuntu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+
+ # disable nothing
+ disabled = []
+ expect = """deb http://ubuntu.com//ubuntu xenial main
+deb http://ubuntu.com//ubuntu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+ result = cc_apt_configure.disable_suites(disabled, orig, release)
+ self.assertEqual(expect, result)
+
+ # single disable release suite
+ disabled = ["$RELEASE"]
+ expect = """\
+# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu xenial main
+deb http://ubuntu.com//ubuntu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+ result = cc_apt_configure.disable_suites(disabled, orig, release)
+ self.assertEqual(expect, result)
+
+ # single disable other suite
+ disabled = ["$RELEASE-updates"]
+ expect = (
+ """deb http://ubuntu.com//ubuntu xenial main
+# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu"""
+ """ xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+ )
+ result = cc_apt_configure.disable_suites(disabled, orig, release)
+ self.assertEqual(expect, result)
+
+ # multi disable
+ disabled = ["$RELEASE-updates", "$RELEASE-security"]
+ expect = (
+ """deb http://ubuntu.com//ubuntu xenial main
+# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """
+ """xenial-updates main
+# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """
+ """xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+ )
+ result = cc_apt_configure.disable_suites(disabled, orig, release)
+ self.assertEqual(expect, result)
+
+ # multi line disable (same suite multiple times in input)
+ disabled = ["$RELEASE-updates", "$RELEASE-security"]
+ orig = """deb http://ubuntu.com//ubuntu xenial main
+deb http://ubuntu.com//ubuntu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://UBUNTU.com//ubuntu xenial-updates main
+deb http://UBUNTU.COM//ubuntu xenial-updates main
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+ expect = (
+ """deb http://ubuntu.com//ubuntu xenial main
+# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """
+ """xenial-updates main
+# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """
+ """xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+# suite disabled by cloud-init: deb http://UBUNTU.com//ubuntu """
+ """xenial-updates main
+# suite disabled by cloud-init: deb http://UBUNTU.COM//ubuntu """
+ """xenial-updates main
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+ )
+ result = cc_apt_configure.disable_suites(disabled, orig, release)
+ self.assertEqual(expect, result)
+
+ # comment in input
+ disabled = ["$RELEASE-updates", "$RELEASE-security"]
+ orig = """deb http://ubuntu.com//ubuntu xenial main
+deb http://ubuntu.com//ubuntu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+#foo
+#deb http://UBUNTU.com//ubuntu xenial-updates main
+deb http://UBUNTU.COM//ubuntu xenial-updates main
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+ expect = (
+ """deb http://ubuntu.com//ubuntu xenial main
+# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """
+ """xenial-updates main
+# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """
+ """xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+#foo
+#deb http://UBUNTU.com//ubuntu xenial-updates main
+# suite disabled by cloud-init: deb http://UBUNTU.COM//ubuntu """
+ """xenial-updates main
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+ )
+ result = cc_apt_configure.disable_suites(disabled, orig, release)
+ self.assertEqual(expect, result)
+
+ # single disable custom suite
+ disabled = ["foobar"]
+ orig = """deb http://ubuntu.com//ubuntu xenial main
+deb http://ubuntu.com//ubuntu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb http://ubuntu.com/ubuntu/ foobar main"""
+ expect = """deb http://ubuntu.com//ubuntu xenial main
+deb http://ubuntu.com//ubuntu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+# suite disabled by cloud-init: deb http://ubuntu.com/ubuntu/ foobar main"""
+ result = cc_apt_configure.disable_suites(disabled, orig, release)
+ self.assertEqual(expect, result)
+
+ # single disable non existing suite
+ disabled = ["foobar"]
+ orig = """deb http://ubuntu.com//ubuntu xenial main
+deb http://ubuntu.com//ubuntu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb http://ubuntu.com/ubuntu/ notfoobar main"""
+ expect = """deb http://ubuntu.com//ubuntu xenial main
+deb http://ubuntu.com//ubuntu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb http://ubuntu.com/ubuntu/ notfoobar main"""
+ result = cc_apt_configure.disable_suites(disabled, orig, release)
+ self.assertEqual(expect, result)
+
+ # single disable suite with option
+ disabled = ["$RELEASE-updates"]
+ orig = """deb http://ubuntu.com//ubuntu xenial main
+deb [a=b] http://ubu.com//ubu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+ expect = (
+ """deb http://ubuntu.com//ubuntu xenial main
+# suite disabled by cloud-init: deb [a=b] http://ubu.com//ubu """
+ """xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+ )
+ result = cc_apt_configure.disable_suites(disabled, orig, release)
+ self.assertEqual(expect, result)
+
+ # single disable suite with more options and auto $RELEASE expansion
+ disabled = ["updates"]
+ orig = """deb http://ubuntu.com//ubuntu xenial main
+deb [a=b c=d] http://ubu.com//ubu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+ expect = """deb http://ubuntu.com//ubuntu xenial main
+# suite disabled by cloud-init: deb [a=b c=d] \
+http://ubu.com//ubu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+ result = cc_apt_configure.disable_suites(disabled, orig, release)
+ self.assertEqual(expect, result)
+
+ # single disable suite while options at others
+ disabled = ["$RELEASE-security"]
+ orig = """deb http://ubuntu.com//ubuntu xenial main
+deb [arch=foo] http://ubuntu.com//ubuntu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+ expect = (
+ """deb http://ubuntu.com//ubuntu xenial main
+deb [arch=foo] http://ubuntu.com//ubuntu xenial-updates main
+# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """
+ """xenial-security main
+deb-src http://ubuntu.com//ubuntu universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
+ )
+ result = cc_apt_configure.disable_suites(disabled, orig, release)
+ self.assertEqual(expect, result)
+
+ def test_disable_suites_blank_lines(self):
+ """test_disable_suites_blank_lines - ensure blank lines allowed"""
+ lines = [
+ "deb %(repo)s %(rel)s main universe",
+ "",
+ "deb %(repo)s %(rel)s-updates main universe",
+ " # random comment",
+ "#comment here",
+ "",
+ ]
+ rel = "trusty"
+ repo = "http://example.com/mirrors/ubuntu"
+ orig = "\n".join(lines) % {"repo": repo, "rel": rel}
+ self.assertEqual(
+ orig, cc_apt_configure.disable_suites(["proposed"], orig, rel)
+ )
+
+ @mock.patch("cloudinit.util.get_hostname", return_value="abc.localdomain")
+ def test_apt_v3_mirror_search_dns(self, m_get_hostname):
+ """test_apt_v3_mirror_search_dns - Test searching dns patterns"""
+ pmir = "phit"
+ smir = "shit"
+ arch = "amd64"
+ mycloud = get_cloud("ubuntu")
+ cfg = {
+ "primary": [{"arches": ["default"], "search_dns": True}],
+ "security": [{"arches": ["default"], "search_dns": True}],
+ }
+
+ with mock.patch.object(
+ cc_apt_configure, "get_mirror", return_value="http://mocked/foo"
+ ) as mockgm:
+ mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch)
+ calls = [
+ call(cfg, "primary", arch, mycloud),
+ call(cfg, "security", arch, mycloud),
+ ]
+ mockgm.assert_has_calls(calls)
+
+ with mock.patch.object(
+ cc_apt_configure,
+ "search_for_mirror_dns",
+ return_value="http://mocked/foo",
+ ) as mocksdns:
+ mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch)
+ calls = [
+ call(True, "primary", cfg, mycloud),
+ call(True, "security", cfg, mycloud),
+ ]
+ mocksdns.assert_has_calls(calls)
+
+ # first return is for the non-dns call before
+ with mock.patch.object(
+ cc_apt_configure.util,
+ "search_for_mirror",
+ side_effect=[None, pmir, None, smir],
+ ) as mockse:
+ mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch)
+
+ calls = [
+ call(None),
+ call(
+ [
+ "http://ubuntu-mirror.localdomain/ubuntu",
+ "http://ubuntu-mirror/ubuntu",
+ ]
+ ),
+ call(None),
+ call(
+ [
+ "http://ubuntu-security-mirror.localdomain/ubuntu",
+ "http://ubuntu-security-mirror/ubuntu",
+ ]
+ ),
+ ]
+ mockse.assert_has_calls(calls)
+
+ self.assertEqual(mirrors["MIRROR"], pmir)
+ self.assertEqual(mirrors["PRIMARY"], pmir)
+ self.assertEqual(mirrors["SECURITY"], smir)
+
+ def test_apt_v3_add_mirror_keys(self):
+ """test_apt_v3_add_mirror_keys - Test adding key for mirrors"""
+ arch = "amd64"
+ cfg = {
+ "primary": [
+ {
+ "arches": [arch],
+ "uri": "http://test.ubuntu.com/",
+ "filename": "primary",
+ "key": "fakekey_primary",
+ }
+ ],
+ "security": [
+ {
+ "arches": [arch],
+ "uri": "http://testsec.ubuntu.com/",
+ "filename": "security",
+ "key": "fakekey_security",
+ }
+ ],
+ }
+
+ with mock.patch.object(cc_apt_configure, "add_apt_key_raw") as mockadd:
+ cc_apt_configure.add_mirror_keys(cfg, TARGET)
+ calls = [
+ mock.call("fakekey_primary", "primary", hardened=False),
+ mock.call("fakekey_security", "security", hardened=False),
+ ]
+ mockadd.assert_has_calls(calls, any_order=True)
+
+
+class TestDebconfSelections(TestCase):
+ @mock.patch("cloudinit.config.cc_apt_configure.subp.subp")
+ def test_set_sel_appends_newline_if_absent(self, m_subp):
+ """Automatically append a newline to debconf-set-selections config."""
+ selections = b"some/setting boolean true"
+ cc_apt_configure.debconf_set_selections(selections=selections)
+ cc_apt_configure.debconf_set_selections(selections=selections + b"\n")
+ m_call = mock.call(
+ ["debconf-set-selections"],
+ data=selections + b"\n",
+ capture=True,
+ target=None,
+ )
+ self.assertEqual([m_call, m_call], m_subp.call_args_list)
+
+ @mock.patch("cloudinit.config.cc_apt_configure.debconf_set_selections")
+ def test_no_set_sel_if_none_to_set(self, m_set_sel):
+ cc_apt_configure.apply_debconf_selections({"foo": "bar"})
+ m_set_sel.assert_not_called()
+
+ @mock.patch("cloudinit.config.cc_apt_configure.debconf_set_selections")
+ @mock.patch(
+ "cloudinit.config.cc_apt_configure.util.get_installed_packages"
+ )
+ def test_set_sel_call_has_expected_input(self, m_get_inst, m_set_sel):
+ data = {
+ "set1": "pkga pkga/q1 mybool false",
+ "set2": (
+ "pkgb\tpkgb/b1\tstr\tthis is a string\n"
+ "pkgc\tpkgc/ip\tstring\t10.0.0.1"
+ ),
+ }
+ lines = "\n".join(data.values()).split("\n")
+
+ m_get_inst.return_value = ["adduser", "apparmor"]
+ m_set_sel.return_value = None
+
+ cc_apt_configure.apply_debconf_selections({"debconf_selections": data})
+ self.assertTrue(m_get_inst.called)
+ self.assertEqual(m_set_sel.call_count, 1)
+
+ # assumes called with *args value.
+ selections = m_set_sel.call_args_list[0][0][0].decode()
+
+ missing = [
+ line for line in lines if line not in selections.splitlines()
+ ]
+ self.assertEqual([], missing)
+
+ @mock.patch("cloudinit.config.cc_apt_configure.dpkg_reconfigure")
+ @mock.patch("cloudinit.config.cc_apt_configure.debconf_set_selections")
+ @mock.patch(
+ "cloudinit.config.cc_apt_configure.util.get_installed_packages"
+ )
+ def test_reconfigure_if_intersection(
+ self, m_get_inst, m_set_sel, m_dpkg_r
+ ):
+ data = {
+ "set1": "pkga pkga/q1 mybool false",
+ "set2": (
+ "pkgb\tpkgb/b1\tstr\tthis is a string\n"
+ "pkgc\tpkgc/ip\tstring\t10.0.0.1"
+ ),
+ "cloud-init": "cloud-init cloud-init/datasourcesmultiselect MAAS",
+ }
+
+ m_set_sel.return_value = None
+ m_get_inst.return_value = [
+ "adduser",
+ "apparmor",
+ "pkgb",
+ "cloud-init",
+ "zdog",
+ ]
+
+ cc_apt_configure.apply_debconf_selections({"debconf_selections": data})
+
+ # reconfigure should be called with the intersection
+ # of (packages in config, packages installed)
+ self.assertEqual(m_dpkg_r.call_count, 1)
+ # assumes called with *args (dpkg_reconfigure([a,b,c], target=))
+ packages = m_dpkg_r.call_args_list[0][0][0]
+ self.assertEqual(set(["cloud-init", "pkgb"]), set(packages))
+
+ @mock.patch("cloudinit.config.cc_apt_configure.dpkg_reconfigure")
+ @mock.patch("cloudinit.config.cc_apt_configure.debconf_set_selections")
+ @mock.patch(
+ "cloudinit.config.cc_apt_configure.util.get_installed_packages"
+ )
+ def test_reconfigure_if_no_intersection(
+ self, m_get_inst, m_set_sel, m_dpkg_r
+ ):
+ data = {"set1": "pkga pkga/q1 mybool false"}
+
+ m_get_inst.return_value = [
+ "adduser",
+ "apparmor",
+ "pkgb",
+ "cloud-init",
+ "zdog",
+ ]
+ m_set_sel.return_value = None
+
+ cc_apt_configure.apply_debconf_selections({"debconf_selections": data})
+
+ self.assertTrue(m_get_inst.called)
+ self.assertEqual(m_dpkg_r.call_count, 0)
+
+ @mock.patch("cloudinit.config.cc_apt_configure.subp.subp")
+ def test_dpkg_reconfigure_does_reconfigure(self, m_subp):
+ target = "/foo-target"
+
+ # due to the way the cleaners are called (via dictionary reference)
+ # mocking clean_cloud_init directly does not work. So we mock
+ # the CONFIG_CLEANERS dictionary and assert our cleaner is called.
+ ci_cleaner = mock.MagicMock()
+ with mock.patch.dict(
+ "cloudinit.config.cc_apt_configure.CONFIG_CLEANERS",
+ values={"cloud-init": ci_cleaner},
+ clear=True,
+ ):
+ cc_apt_configure.dpkg_reconfigure(
+ ["pkga", "cloud-init"], target=target
+ )
+ # cloud-init is actually the only package we have a cleaner for
+ # so for now, its the only one that should reconfigured
+ self.assertTrue(m_subp.called)
+ ci_cleaner.assert_called_with(target)
+ self.assertEqual(m_subp.call_count, 1)
+ found = m_subp.call_args_list[0][0][0]
+ expected = [
+ "dpkg-reconfigure",
+ "--frontend=noninteractive",
+ "cloud-init",
+ ]
+ self.assertEqual(expected, found)
+
+ @mock.patch("cloudinit.config.cc_apt_configure.subp.subp")
+ def test_dpkg_reconfigure_not_done_on_no_data(self, m_subp):
+ cc_apt_configure.dpkg_reconfigure([])
+ m_subp.assert_not_called()
+
+ @mock.patch("cloudinit.config.cc_apt_configure.subp.subp")
+ def test_dpkg_reconfigure_not_done_if_no_cleaners(self, m_subp):
+ cc_apt_configure.dpkg_reconfigure(["pkgfoo", "pkgbar"])
+ m_subp.assert_not_called()
+
+
+#
+# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_apk_configure.py b/tests/unittests/config/test_cc_apk_configure.py
new file mode 100644
index 00000000..85dd028f
--- /dev/null
+++ b/tests/unittests/config/test_cc_apk_configure.py
@@ -0,0 +1,410 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+""" test_apk_configure
+Test creation of repositories file
+"""
+
+import logging
+import os
+import re
+import textwrap
+
+import pytest
+
+from cloudinit import cloud, helpers, util
+from cloudinit.config import cc_apk_configure
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
+from tests.unittests.helpers import (
+ FilesystemMockingTestCase,
+ mock,
+ skipUnlessJsonSchema,
+)
+
+REPO_FILE = "/etc/apk/repositories"
+DEFAULT_MIRROR_URL = "https://alpine.global.ssl.fastly.net/alpine"
+CC_APK = "cloudinit.config.cc_apk_configure"
+
+
+class TestNoConfig(FilesystemMockingTestCase):
+ def setUp(self):
+ super(TestNoConfig, self).setUp()
+ self.add_patch(CC_APK + "._write_repositories_file", "m_write_repos")
+ self.name = "apk-configure"
+ self.cloud_init = None
+ self.log = logging.getLogger("TestNoConfig")
+ self.args = []
+
+ def test_no_config(self):
+ """
+ Test that nothing is done if no apk-configure
+ configuration is provided.
+ """
+ config = util.get_builtin_cfg()
+
+ cc_apk_configure.handle(
+ self.name, config, self.cloud_init, self.log, self.args
+ )
+
+ self.assertEqual(0, self.m_write_repos.call_count)
+
+
+class TestConfig(FilesystemMockingTestCase):
+ def setUp(self):
+ super(TestConfig, self).setUp()
+ self.new_root = self.tmp_dir()
+ self.new_root = self.reRoot(root=self.new_root)
+ for dirname in ["tmp", "etc/apk"]:
+ util.ensure_dir(os.path.join(self.new_root, dirname))
+ self.paths = helpers.Paths({"templates_dir": self.new_root})
+ self.name = "apk-configure"
+ self.cloud = cloud.Cloud(None, self.paths, None, None, None)
+ self.log = logging.getLogger("TestNoConfig")
+ self.args = []
+
+ @mock.patch(CC_APK + "._write_repositories_file")
+ def test_no_repo_settings(self, m_write_repos):
+ """
+ Test that nothing is written if the 'alpine-repo' key
+ is not present.
+ """
+ config = {"apk_repos": {}}
+
+ cc_apk_configure.handle(
+ self.name, config, self.cloud, self.log, self.args
+ )
+
+ self.assertEqual(0, m_write_repos.call_count)
+
+ @mock.patch(CC_APK + "._write_repositories_file")
+ def test_empty_repo_settings(self, m_write_repos):
+ """
+ Test that nothing is written if 'alpine_repo' list is empty.
+ """
+ config = {"apk_repos": {"alpine_repo": []}}
+
+ cc_apk_configure.handle(
+ self.name, config, self.cloud, self.log, self.args
+ )
+
+ self.assertEqual(0, m_write_repos.call_count)
+
+ def test_only_main_repo(self):
+ """
+ Test when only details of main repo is written to file.
+ """
+ alpine_version = "v3.12"
+ config = {"apk_repos": {"alpine_repo": {"version": alpine_version}}}
+
+ cc_apk_configure.handle(
+ self.name, config, self.cloud, self.log, self.args
+ )
+
+ expected_content = textwrap.dedent(
+ """\
+ #
+ # Created by cloud-init
+ #
+ # This file is written on first boot of an instance
+ #
+
+ {0}/{1}/main
+
+ """.format(
+ DEFAULT_MIRROR_URL, alpine_version
+ )
+ )
+
+ self.assertEqual(expected_content, util.load_file(REPO_FILE))
+
+ def test_main_and_community_repos(self):
+ """
+ Test when only details of main and community repos are
+ written to file.
+ """
+ alpine_version = "edge"
+ config = {
+ "apk_repos": {
+ "alpine_repo": {
+ "version": alpine_version,
+ "community_enabled": True,
+ }
+ }
+ }
+
+ cc_apk_configure.handle(
+ self.name, config, self.cloud, self.log, self.args
+ )
+
+ expected_content = textwrap.dedent(
+ """\
+ #
+ # Created by cloud-init
+ #
+ # This file is written on first boot of an instance
+ #
+
+ {0}/{1}/main
+ {0}/{1}/community
+
+ """.format(
+ DEFAULT_MIRROR_URL, alpine_version
+ )
+ )
+
+ self.assertEqual(expected_content, util.load_file(REPO_FILE))
+
+ def test_main_community_testing_repos(self):
+ """
+ Test when details of main, community and testing repos
+ are written to file.
+ """
+ alpine_version = "v3.12"
+ config = {
+ "apk_repos": {
+ "alpine_repo": {
+ "version": alpine_version,
+ "community_enabled": True,
+ "testing_enabled": True,
+ }
+ }
+ }
+
+ cc_apk_configure.handle(
+ self.name, config, self.cloud, self.log, self.args
+ )
+
+ expected_content = textwrap.dedent(
+ """\
+ #
+ # Created by cloud-init
+ #
+ # This file is written on first boot of an instance
+ #
+
+ {0}/{1}/main
+ {0}/{1}/community
+ #
+ # Testing - using with non-Edge installation may cause problems!
+ #
+ {0}/edge/testing
+
+ """.format(
+ DEFAULT_MIRROR_URL, alpine_version
+ )
+ )
+
+ self.assertEqual(expected_content, util.load_file(REPO_FILE))
+
+ def test_edge_main_community_testing_repos(self):
+ """
+ Test when details of main, community and testing repos
+ for Edge version of Alpine are written to file.
+ """
+ alpine_version = "edge"
+ config = {
+ "apk_repos": {
+ "alpine_repo": {
+ "version": alpine_version,
+ "community_enabled": True,
+ "testing_enabled": True,
+ }
+ }
+ }
+
+ cc_apk_configure.handle(
+ self.name, config, self.cloud, self.log, self.args
+ )
+
+ expected_content = textwrap.dedent(
+ """\
+ #
+ # Created by cloud-init
+ #
+ # This file is written on first boot of an instance
+ #
+
+ {0}/{1}/main
+ {0}/{1}/community
+ {0}/{1}/testing
+
+ """.format(
+ DEFAULT_MIRROR_URL, alpine_version
+ )
+ )
+
+ self.assertEqual(expected_content, util.load_file(REPO_FILE))
+
+ def test_main_community_testing_local_repos(self):
+ """
+ Test when details of main, community, testing and
+ local repos are written to file.
+ """
+ alpine_version = "v3.12"
+ local_repo_url = "http://some.mirror/whereever"
+ config = {
+ "apk_repos": {
+ "alpine_repo": {
+ "version": alpine_version,
+ "community_enabled": True,
+ "testing_enabled": True,
+ },
+ "local_repo_base_url": local_repo_url,
+ }
+ }
+
+ cc_apk_configure.handle(
+ self.name, config, self.cloud, self.log, self.args
+ )
+
+ expected_content = textwrap.dedent(
+ """\
+ #
+ # Created by cloud-init
+ #
+ # This file is written on first boot of an instance
+ #
+
+ {0}/{1}/main
+ {0}/{1}/community
+ #
+ # Testing - using with non-Edge installation may cause problems!
+ #
+ {0}/edge/testing
+
+ #
+ # Local repo
+ #
+ {2}/{1}
+
+ """.format(
+ DEFAULT_MIRROR_URL, alpine_version, local_repo_url
+ )
+ )
+
+ self.assertEqual(expected_content, util.load_file(REPO_FILE))
+
+ def test_edge_main_community_testing_local_repos(self):
+ """
+ Test when details of main, community, testing and local repos
+ for Edge version of Alpine are written to file.
+ """
+ alpine_version = "edge"
+ local_repo_url = "http://some.mirror/whereever"
+ config = {
+ "apk_repos": {
+ "alpine_repo": {
+ "version": alpine_version,
+ "community_enabled": True,
+ "testing_enabled": True,
+ },
+ "local_repo_base_url": local_repo_url,
+ }
+ }
+
+ cc_apk_configure.handle(
+ self.name, config, self.cloud, self.log, self.args
+ )
+
+ expected_content = textwrap.dedent(
+ """\
+ #
+ # Created by cloud-init
+ #
+ # This file is written on first boot of an instance
+ #
+
+ {0}/{1}/main
+ {0}/{1}/community
+ {0}/edge/testing
+
+ #
+ # Local repo
+ #
+ {2}/{1}
+
+ """.format(
+ DEFAULT_MIRROR_URL, alpine_version, local_repo_url
+ )
+ )
+
+ self.assertEqual(expected_content, util.load_file(REPO_FILE))
+
+
+class TestApkConfigureSchema:
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ (
+ # Valid schemas
+ ({"apk_repos": {"preserve_repositories": True}}, None),
+ ({"apk_repos": {"alpine_repo": None}}, None),
+ ({"apk_repos": {"alpine_repo": {"version": "v3.21"}}}, None),
+ (
+ {
+ "apk_repos": {
+ "alpine_repo": {
+ "base_url": "http://yep",
+ "community_enabled": True,
+ "testing_enabled": True,
+ "version": "v3.21",
+ }
+ }
+ },
+ None,
+ ),
+ ({"apk_repos": {"local_repo_base_url": "http://some"}}, None),
+ # Invalid schemas
+ (
+ {"apk_repos": {"alpine_repo": {"version": False}}},
+ "apk_repos.alpine_repo.version: False is not of type"
+ " 'string'",
+ ),
+ (
+ {
+ "apk_repos": {
+ "alpine_repo": {"version": "v3.12", "bogus": 1}
+ }
+ },
+ re.escape(
+ "apk_repos.alpine_repo: Additional properties are not"
+ " allowed ('bogus' was unexpected)"
+ ),
+ ),
+ (
+ {"apk_repos": {"alpine_repo": {}}},
+ "apk_repos.alpine_repo: 'version' is a required property,"
+ " apk_repos.alpine_repo: {} does not have enough properties",
+ ),
+ (
+ {"apk_repos": {"alpine_repo": True}},
+ "apk_repos.alpine_repo: True is not of type 'object', 'null'",
+ ),
+ (
+ {"apk_repos": {"preserve_repositories": "wrongtype"}},
+ "apk_repos.preserve_repositories: 'wrongtype' is not of type"
+ " 'boolean'",
+ ),
+ (
+ {"apk_repos": {}},
+ "apk_repos: {} does not have enough properties",
+ ),
+ (
+ {"apk_repos": {"local_repo_base_url": None}},
+ "apk_repos.local_repo_base_url: None is not of type 'string'",
+ ),
+ ),
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ schema = get_schema()
+ if error_msg is None:
+ validate_cloudconfig_schema(config, schema, strict=True)
+ else:
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, schema, strict=True)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_apt_configure.py b/tests/unittests/config/test_cc_apt_configure.py
new file mode 100644
index 00000000..bd1bb963
--- /dev/null
+++ b/tests/unittests/config/test_cc_apt_configure.py
@@ -0,0 +1,202 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+""" Tests for cc_apt_configure module """
+
+import re
+
+import pytest
+
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
+from tests.unittests.helpers import skipUnlessJsonSchema
+
+
+class TestAPTConfigureSchema:
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ (
+ # Supplement valid schemas from examples tested in test_schema
+ ({"apt": {"preserve_sources_list": True}}, None),
+ # Invalid schemas
+ (
+ {"apt": "nonobject"},
+ "apt: 'nonobject' is not of type 'object",
+ ),
+ (
+ {"apt": {"boguskey": True}},
+ re.escape(
+ "apt: Additional properties are not allowed"
+ " ('boguskey' was unexpected)"
+ ),
+ ),
+ ({"apt": {}}, "apt: {} does not have enough properties"),
+ (
+ {"apt": {"preserve_sources_list": 1}},
+ "apt.preserve_sources_list: 1 is not of type 'boolean'",
+ ),
+ (
+ {"apt": {"disable_suites": 1}},
+ "apt.disable_suites: 1 is not of type 'array'",
+ ),
+ (
+ {"apt": {"disable_suites": []}},
+ re.escape("apt.disable_suites: [] is too short"),
+ ),
+ (
+ {"apt": {"disable_suites": [1]}},
+ "apt.disable_suites.0: 1 is not of type 'string'",
+ ),
+ (
+ {"apt": {"disable_suites": ["a", "a"]}},
+ re.escape(
+ "apt.disable_suites: ['a', 'a'] has non-unique elements"
+ ),
+ ),
+ # All apt: primary tests are applicable for "security" key too.
+ # Those apt:security tests are exercised in the unittest below
+ (
+ {"apt": {"primary": "nonlist"}},
+ "apt.primary: 'nonlist' is not of type 'array'",
+ ),
+ (
+ {"apt": {"primary": []}},
+ re.escape("apt.primary: [] is too short"),
+ ),
+ (
+ {"apt": {"primary": ["nonobj"]}},
+ "apt.primary.0: 'nonobj' is not of type 'object'",
+ ),
+ (
+ {"apt": {"primary": [{}]}},
+ "apt.primary.0: 'arches' is a required property",
+ ),
+ (
+ {"apt": {"primary": [{"boguskey": True}]}},
+ re.escape(
+ "apt.primary.0: Additional properties are not allowed"
+ " ('boguskey' was unexpected)"
+ ),
+ ),
+ (
+ {"apt": {"primary": [{"arches": True}]}},
+ "apt.primary.0.arches: True is not of type 'array'",
+ ),
+ (
+ {"apt": {"primary": [{"uri": True}]}},
+ "apt.primary.0.uri: True is not of type 'string'",
+ ),
+ (
+ {
+ "apt": {
+ "primary": [
+ {"arches": ["amd64"], "search": "non-array"}
+ ]
+ }
+ },
+ "apt.primary.0.search: 'non-array' is not of type 'array'",
+ ),
+ (
+ {"apt": {"primary": [{"arches": ["amd64"], "search": []}]}},
+ re.escape("apt.primary.0.search: [] is too short"),
+ ),
+ (
+ {
+ "apt": {
+ "primary": [{"arches": ["amd64"], "search_dns": "a"}]
+ }
+ },
+ "apt.primary.0.search_dns: 'a' is not of type 'boolean'",
+ ),
+ (
+ {"apt": {"primary": [{"arches": ["amd64"], "keyid": 1}]}},
+ "apt.primary.0.keyid: 1 is not of type 'string'",
+ ),
+ (
+ {"apt": {"primary": [{"arches": ["amd64"], "key": 1}]}},
+ "apt.primary.0.key: 1 is not of type 'string'",
+ ),
+ (
+ {"apt": {"primary": [{"arches": ["amd64"], "keyserver": 1}]}},
+ "apt.primary.0.keyserver: 1 is not of type 'string'",
+ ),
+ (
+ {"apt": {"add_apt_repo_match": True}},
+ "apt.add_apt_repo_match: True is not of type 'string'",
+ ),
+ (
+ {"apt": {"debconf_selections": True}},
+ "apt.debconf_selections: True is not of type 'object'",
+ ),
+ (
+ {"apt": {"debconf_selections": {}}},
+ "apt.debconf_selections: {} does not have enough properties",
+ ),
+ (
+ {"apt": {"sources_list": True}},
+ "apt.sources_list: True is not of type 'string'",
+ ),
+ (
+ {"apt": {"conf": True}},
+ "apt.conf: True is not of type 'string'",
+ ),
+ (
+ {"apt": {"http_proxy": True}},
+ "apt.http_proxy: True is not of type 'string'",
+ ),
+ (
+ {"apt": {"https_proxy": True}},
+ "apt.https_proxy: True is not of type 'string'",
+ ),
+ (
+ {"apt": {"proxy": True}},
+ "apt.proxy: True is not of type 'string'",
+ ),
+ (
+ {"apt": {"ftp_proxy": True}},
+ "apt.ftp_proxy: True is not of type 'string'",
+ ),
+ (
+ {"apt": {"sources": True}},
+ "apt.sources: True is not of type 'object'",
+ ),
+ (
+ {"apt": {"sources": {"opaquekey": True}}},
+ "apt.sources.opaquekey: True is not of type 'object'",
+ ),
+ (
+ {"apt": {"sources": {"opaquekey": {}}}},
+ "apt.sources.opaquekey: {} does not have enough properties",
+ ),
+ (
+ {"apt": {"sources": {"opaquekey": {"boguskey": True}}}},
+ re.escape(
+ "apt.sources.opaquekey: Additional properties are not"
+ " allowed ('boguskey' was unexpected)"
+ ),
+ ),
+ ),
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ schema = get_schema()
+ if error_msg is None:
+ validate_cloudconfig_schema(config, schema, strict=True)
+ else:
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, schema, strict=True)
+ # Note apt['primary'] and apt['security'] have same defition
+ # Avoid test setup duplicates by running same test using 'security'
+ if isinstance(config.get("apt"), dict) and config["apt"].get(
+ "primary"
+ ):
+ # To exercise security schema, rename test key from primary
+ config["apt"]["security"] = config["apt"].pop("primary")
+ error_msg = error_msg.replace("primary", "security")
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, schema, strict=True)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_apt_pipelining.py b/tests/unittests/config/test_cc_apt_pipelining.py
new file mode 100644
index 00000000..0f72d32b
--- /dev/null
+++ b/tests/unittests/config/test_cc_apt_pipelining.py
@@ -0,0 +1,65 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Tests cc_apt_pipelining handler"""
+
+import pytest
+
+import cloudinit.config.cc_apt_pipelining as cc_apt_pipelining
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
+from tests.unittests.helpers import mock, skipUnlessJsonSchema
+
+
+class TestAptPipelining:
+ @mock.patch("cloudinit.config.cc_apt_pipelining.util.write_file")
+ def test_not_disabled_by_default(self, m_write_file):
+ """ensure that default behaviour is to not disable pipelining"""
+ cc_apt_pipelining.handle("foo", {}, None, mock.MagicMock(), None)
+ assert 0 == m_write_file.call_count
+
+ @mock.patch("cloudinit.config.cc_apt_pipelining.util.write_file")
+ def test_false_disables_pipelining(self, m_write_file):
+ """ensure that pipelining can be disabled with correct config"""
+ cc_apt_pipelining.handle(
+ "foo", {"apt_pipelining": "false"}, None, mock.MagicMock(), None
+ )
+ assert 1 == m_write_file.call_count
+ args, _ = m_write_file.call_args
+ assert cc_apt_pipelining.DEFAULT_FILE == args[0]
+ assert 'Pipeline-Depth "0"' in args[1]
+
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ (
+ # Valid schemas
+ ({}, None),
+ ({"apt_pipelining": 1}, None),
+ ({"apt_pipelining": True}, None),
+ ({"apt_pipelining": False}, None),
+ ({"apt_pipelining": "none"}, None),
+ ({"apt_pipelining": "unchanged"}, None),
+ ({"apt_pipelining": "os"}, None),
+ # Invalid schemas
+ (
+ {"apt_pipelining": "bogus"},
+ "Cloud config schema errors: apt_pipelining: 'bogus' is not"
+ " valid under any of the given schema",
+ ),
+ ),
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ """Assert expected schema validation and error messages."""
+ # New-style schema $defs exist in config/cloud-init-schema*.json
+ schema = get_schema()
+ if error_msg is None:
+ validate_cloudconfig_schema(config, schema, strict=True)
+ else:
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, schema, strict=True)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_bootcmd.py b/tests/unittests/config/test_cc_bootcmd.py
new file mode 100644
index 00000000..34b16b85
--- /dev/null
+++ b/tests/unittests/config/test_cc_bootcmd.py
@@ -0,0 +1,165 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+import logging
+import re
+import tempfile
+
+import pytest
+
+from cloudinit import subp, util
+from cloudinit.config.cc_bootcmd import handle
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
+from tests.unittests.helpers import CiTestCase, mock, skipUnlessJsonSchema
+from tests.unittests.util import get_cloud
+
+LOG = logging.getLogger(__name__)
+
+
+class FakeExtendedTempFile(object):
+ def __init__(self, suffix):
+ self.suffix = suffix
+ self.handle = tempfile.NamedTemporaryFile(
+ prefix="ci-%s." % self.__class__.__name__, delete=False
+ )
+
+ def __enter__(self):
+ return self.handle
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self.handle.close()
+ util.del_file(self.handle.name)
+
+
+class TestBootcmd(CiTestCase):
+
+ with_logs = True
+
+ _etmpfile_path = (
+ "cloudinit.config.cc_bootcmd.temp_utils.ExtendedTemporaryFile"
+ )
+
+ def setUp(self):
+ super(TestBootcmd, self).setUp()
+ self.subp = subp.subp
+ self.new_root = self.tmp_dir()
+
+ def test_handler_skip_if_no_bootcmd(self):
+ """When the provided config doesn't contain bootcmd, skip it."""
+ cfg = {}
+ mycloud = get_cloud()
+ handle("notimportant", cfg, mycloud, LOG, None)
+ self.assertIn(
+ "Skipping module named notimportant, no 'bootcmd' key",
+ self.logs.getvalue(),
+ )
+
+ def test_handler_invalid_command_set(self):
+ """Commands which can't be converted to shell will raise errors."""
+ invalid_config = {"bootcmd": 1}
+ cc = get_cloud()
+ with self.assertRaises(TypeError) as context_manager:
+ handle("cc_bootcmd", invalid_config, cc, LOG, [])
+ self.assertIn("Failed to shellify bootcmd", self.logs.getvalue())
+ self.assertEqual(
+ "Input to shellify was type 'int'. Expected list or tuple.",
+ str(context_manager.exception),
+ )
+
+ invalid_config = {
+ "bootcmd": ["ls /", 20, ["wget", "http://stuff/blah"], {"a": "n"}]
+ }
+ cc = get_cloud()
+ with self.assertRaises(TypeError) as context_manager:
+ handle("cc_bootcmd", invalid_config, cc, LOG, [])
+ logs = self.logs.getvalue()
+ self.assertIn("Failed to shellify", logs)
+ self.assertEqual(
+ "Unable to shellify type 'int'. Expected list, string, tuple. "
+ "Got: 20",
+ str(context_manager.exception),
+ )
+
+ def test_handler_creates_and_runs_bootcmd_script_with_instance_id(self):
+ """Valid schema runs a bootcmd script with INSTANCE_ID in the env."""
+ cc = get_cloud()
+ out_file = self.tmp_path("bootcmd.out", self.new_root)
+ my_id = "b6ea0f59-e27d-49c6-9f87-79f19765a425"
+ valid_config = {
+ "bootcmd": ["echo {0} $INSTANCE_ID > {1}".format(my_id, out_file)]
+ }
+
+ with mock.patch(self._etmpfile_path, FakeExtendedTempFile):
+ with self.allow_subp(["/bin/sh"]):
+ handle("cc_bootcmd", valid_config, cc, LOG, [])
+ self.assertEqual(
+ my_id + " iid-datasource-none\n", util.load_file(out_file)
+ )
+
+ def test_handler_runs_bootcmd_script_with_error(self):
+ """When a valid script generates an error, that error is raised."""
+ cc = get_cloud()
+ valid_config = {"bootcmd": ["exit 1"]} # Script with error
+
+ with mock.patch(self._etmpfile_path, FakeExtendedTempFile):
+ with self.allow_subp(["/bin/sh"]):
+ with self.assertRaises(subp.ProcessExecutionError) as ctxt:
+ handle("does-not-matter", valid_config, cc, LOG, [])
+ self.assertIn(
+ "Unexpected error while running command.\nCommand: ['/bin/sh',",
+ str(ctxt.exception),
+ )
+ self.assertIn(
+ "Failed to run bootcmd module does-not-matter",
+ self.logs.getvalue(),
+ )
+
+
+@skipUnlessJsonSchema()
+class TestBootCMDSchema:
+ """Directly test schema rather than through handle."""
+
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ (
+ # Valid schemas tested by meta.examples in test_schema
+ # Invalid schemas
+ (
+ {"bootcmd": 1},
+ "Cloud config schema errors: bootcmd: 1 is not of type"
+ " 'array'",
+ ),
+ ({"bootcmd": []}, re.escape("bootcmd: [] is too short")),
+ (
+ {"bootcmd": []},
+ re.escape(
+ "Cloud config schema errors: bootcmd: [] is too short"
+ ),
+ ),
+ (
+ {
+ "bootcmd": [
+ "ls /",
+ 20,
+ ["wget", "http://stuff/blah"],
+ {"a": "n"},
+ ]
+ },
+ "Cloud config schema errors: bootcmd.1: 20 is not valid under"
+ " any of the given schemas, bootcmd.3: {'a': 'n'} is not"
+ " valid under any of the given schemas",
+ ),
+ ),
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ """Assert expected schema validation and error messages."""
+ # New-style schema $defs exist in config/cloud-init-schema*.json
+ schema = get_schema()
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, schema, strict=True)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_byobu.py b/tests/unittests/config/test_cc_byobu.py
new file mode 100644
index 00000000..fbdf3403
--- /dev/null
+++ b/tests/unittests/config/test_cc_byobu.py
@@ -0,0 +1,51 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import re
+
+import pytest
+
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
+from tests.unittests.helpers import skipUnlessJsonSchema
+
+
+class TestByobuSchema:
+ """Directly test schema rather than through handle."""
+
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ (
+ # Supplement valid schemas tested by meta.examples in test_schema
+ ({"byobu_by_default": "enable"}, None),
+ # Invalid schemas
+ (
+ {"byobu_by_default": 1},
+ "byobu_by_default: 1 is not of type 'string'",
+ ),
+ (
+ {"byobu_by_default": "bogusenum"},
+ re.escape(
+ "byobu_by_default: 'bogusenum' is not one of"
+ " ['enable-system', 'enable-user', 'disable-system',"
+ " 'disable-user', 'enable', 'disable',"
+ " 'user', 'system']"
+ ),
+ ),
+ ),
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ """Assert expected schema validation and error messages."""
+ # New-style schema $defs exist in config/cloud-init-schema*.json
+ schema = get_schema()
+ if error_msg is None:
+ validate_cloudconfig_schema(config, schema, strict=True)
+ else:
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, schema, strict=True)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_ca_certs.py b/tests/unittests/config/test_cc_ca_certs.py
new file mode 100644
index 00000000..39614635
--- /dev/null
+++ b/tests/unittests/config/test_cc_ca_certs.py
@@ -0,0 +1,507 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+import logging
+import re
+import shutil
+import tempfile
+import unittest
+from contextlib import ExitStack
+from unittest import mock
+
+import pytest
+
+from cloudinit import distros, helpers, subp, util
+from cloudinit.config import cc_ca_certs
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
+from tests.unittests.helpers import TestCase, skipUnlessJsonSchema
+from tests.unittests.util import get_cloud
+
+
+class TestNoConfig(unittest.TestCase):
+ def setUp(self):
+ super(TestNoConfig, self).setUp()
+ self.name = "ca-certs"
+ self.cloud_init = None
+ self.log = logging.getLogger("TestNoConfig")
+ self.args = []
+
+ def test_no_config(self):
+ """
+ Test that nothing is done if no ca-certs configuration is provided.
+ """
+ config = util.get_builtin_cfg()
+ with ExitStack() as mocks:
+ util_mock = mocks.enter_context(
+ mock.patch.object(util, "write_file")
+ )
+ certs_mock = mocks.enter_context(
+ mock.patch.object(cc_ca_certs, "update_ca_certs")
+ )
+
+ cc_ca_certs.handle(
+ self.name, config, self.cloud_init, self.log, self.args
+ )
+
+ self.assertEqual(util_mock.call_count, 0)
+ self.assertEqual(certs_mock.call_count, 0)
+
+
+class TestConfig(TestCase):
+ def setUp(self):
+ super(TestConfig, self).setUp()
+ self.name = "ca-certs"
+ self.paths = None
+ self.log = logging.getLogger("TestNoConfig")
+ self.args = []
+
+ def _fetch_distro(self, kind):
+ cls = distros.fetch(kind)
+ paths = helpers.Paths({})
+ return cls(kind, {}, paths)
+
+ def _mock_init(self):
+ self.mocks = ExitStack()
+ self.addCleanup(self.mocks.close)
+
+ # Mock out the functions that actually modify the system
+ self.mock_add = self.mocks.enter_context(
+ mock.patch.object(cc_ca_certs, "add_ca_certs")
+ )
+ self.mock_update = self.mocks.enter_context(
+ mock.patch.object(cc_ca_certs, "update_ca_certs")
+ )
+ self.mock_remove = self.mocks.enter_context(
+ mock.patch.object(cc_ca_certs, "remove_default_ca_certs")
+ )
+
+ def test_no_trusted_list(self):
+ """
+ Test that no certificates are written if the 'trusted' key is not
+ present.
+ """
+ config = {"ca-certs": {}}
+
+ for distro_name in cc_ca_certs.distros:
+ self._mock_init()
+ cloud = get_cloud(distro_name)
+ cc_ca_certs.handle(self.name, config, cloud, self.log, self.args)
+
+ self.assertEqual(self.mock_add.call_count, 0)
+ self.assertEqual(self.mock_update.call_count, 1)
+ self.assertEqual(self.mock_remove.call_count, 0)
+
+ def test_empty_trusted_list(self):
+ """Test that no certificate are written if 'trusted' list is empty."""
+ config = {"ca-certs": {"trusted": []}}
+
+ for distro_name in cc_ca_certs.distros:
+ self._mock_init()
+ cloud = get_cloud(distro_name)
+ cc_ca_certs.handle(self.name, config, cloud, self.log, self.args)
+
+ self.assertEqual(self.mock_add.call_count, 0)
+ self.assertEqual(self.mock_update.call_count, 1)
+ self.assertEqual(self.mock_remove.call_count, 0)
+
+ def test_single_trusted(self):
+ """Test that a single cert gets passed to add_ca_certs."""
+ config = {"ca-certs": {"trusted": ["CERT1"]}}
+
+ for distro_name in cc_ca_certs.distros:
+ self._mock_init()
+ cloud = get_cloud(distro_name)
+ conf = cc_ca_certs._distro_ca_certs_configs(distro_name)
+ cc_ca_certs.handle(self.name, config, cloud, self.log, self.args)
+
+ self.mock_add.assert_called_once_with(conf, ["CERT1"])
+ self.assertEqual(self.mock_update.call_count, 1)
+ self.assertEqual(self.mock_remove.call_count, 0)
+
+ def test_multiple_trusted(self):
+ """Test that multiple certs get passed to add_ca_certs."""
+ config = {"ca-certs": {"trusted": ["CERT1", "CERT2"]}}
+
+ for distro_name in cc_ca_certs.distros:
+ self._mock_init()
+ cloud = get_cloud(distro_name)
+ conf = cc_ca_certs._distro_ca_certs_configs(distro_name)
+ cc_ca_certs.handle(self.name, config, cloud, self.log, self.args)
+
+ self.mock_add.assert_called_once_with(conf, ["CERT1", "CERT2"])
+ self.assertEqual(self.mock_update.call_count, 1)
+ self.assertEqual(self.mock_remove.call_count, 0)
+
+ def test_remove_default_ca_certs(self):
+ """Test remove_defaults works as expected."""
+ config = {"ca_certs": {"remove_defaults": True}}
+
+ for distro_name in cc_ca_certs.distros:
+ self._mock_init()
+ cloud = get_cloud(distro_name)
+ cc_ca_certs.handle(self.name, config, cloud, self.log, self.args)
+
+ self.assertEqual(self.mock_add.call_count, 0)
+ self.assertEqual(self.mock_update.call_count, 1)
+ self.assertEqual(self.mock_remove.call_count, 1)
+
+ def test_no_remove_defaults_if_false(self):
+ """Test remove_defaults is not called when config value is False."""
+ config = {"ca_certs": {"remove_defaults": False}}
+
+ for distro_name in cc_ca_certs.distros:
+ self._mock_init()
+ cloud = get_cloud(distro_name)
+ cc_ca_certs.handle(self.name, config, cloud, self.log, self.args)
+
+ self.assertEqual(self.mock_add.call_count, 0)
+ self.assertEqual(self.mock_update.call_count, 1)
+ self.assertEqual(self.mock_remove.call_count, 0)
+
+ def test_correct_order_for_remove_then_add(self):
+ """Test remove_defaults is not called when config value is False."""
+ config = {"ca_certs": {"remove_defaults": True, "trusted": ["CERT1"]}}
+
+ for distro_name in cc_ca_certs.distros:
+ self._mock_init()
+ cloud = get_cloud(distro_name)
+ conf = cc_ca_certs._distro_ca_certs_configs(distro_name)
+ cc_ca_certs.handle(self.name, config, cloud, self.log, self.args)
+
+ self.mock_add.assert_called_once_with(conf, ["CERT1"])
+ self.assertEqual(self.mock_update.call_count, 1)
+ self.assertEqual(self.mock_remove.call_count, 1)
+
+
+class TestAddCaCerts(TestCase):
+ def setUp(self):
+ super(TestAddCaCerts, self).setUp()
+ tmpdir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, tmpdir)
+ self.paths = helpers.Paths(
+ {
+ "cloud_dir": tmpdir,
+ }
+ )
+ self.add_patch("cloudinit.config.cc_ca_certs.os.stat", "m_stat")
+
+ def _fetch_distro(self, kind):
+ cls = distros.fetch(kind)
+ paths = helpers.Paths({})
+ return cls(kind, {}, paths)
+
+ def test_no_certs_in_list(self):
+ """Test that no certificate are written if not provided."""
+ for distro_name in cc_ca_certs.distros:
+ conf = cc_ca_certs._distro_ca_certs_configs(distro_name)
+ with mock.patch.object(util, "write_file") as mockobj:
+ cc_ca_certs.add_ca_certs(conf, [])
+ self.assertEqual(mockobj.call_count, 0)
+
+ def test_single_cert_trailing_cr(self):
+ """Test adding a single certificate to the trusted CAs
+ when existing ca-certificates has trailing newline"""
+ cert = "CERT1\nLINE2\nLINE3"
+
+ ca_certs_content = "line1\nline2\ncloud-init-ca-certs.crt\nline3\n"
+ expected = "line1\nline2\nline3\ncloud-init-ca-certs.crt\n"
+
+ self.m_stat.return_value.st_size = 1
+
+ for distro_name in cc_ca_certs.distros:
+ conf = cc_ca_certs._distro_ca_certs_configs(distro_name)
+
+ with ExitStack() as mocks:
+ mock_write = mocks.enter_context(
+ mock.patch.object(util, "write_file")
+ )
+ mock_load = mocks.enter_context(
+ mock.patch.object(
+ util, "load_file", return_value=ca_certs_content
+ )
+ )
+
+ cc_ca_certs.add_ca_certs(conf, [cert])
+
+ mock_write.assert_has_calls(
+ [mock.call(conf["ca_cert_full_path"], cert, mode=0o644)]
+ )
+ if conf["ca_cert_config"] is not None:
+ mock_write.assert_has_calls(
+ [
+ mock.call(
+ conf["ca_cert_config"], expected, omode="wb"
+ )
+ ]
+ )
+ mock_load.assert_called_once_with(conf["ca_cert_config"])
+
+ def test_single_cert_no_trailing_cr(self):
+ """Test adding a single certificate to the trusted CAs
+ when existing ca-certificates has no trailing newline"""
+ cert = "CERT1\nLINE2\nLINE3"
+
+ ca_certs_content = "line1\nline2\nline3"
+
+ self.m_stat.return_value.st_size = 1
+
+ for distro_name in cc_ca_certs.distros:
+ conf = cc_ca_certs._distro_ca_certs_configs(distro_name)
+
+ with ExitStack() as mocks:
+ mock_write = mocks.enter_context(
+ mock.patch.object(util, "write_file")
+ )
+ mock_load = mocks.enter_context(
+ mock.patch.object(
+ util, "load_file", return_value=ca_certs_content
+ )
+ )
+
+ cc_ca_certs.add_ca_certs(conf, [cert])
+
+ mock_write.assert_has_calls(
+ [mock.call(conf["ca_cert_full_path"], cert, mode=0o644)]
+ )
+ if conf["ca_cert_config"] is not None:
+ mock_write.assert_has_calls(
+ [
+ mock.call(
+ conf["ca_cert_config"],
+ "%s\n%s\n"
+ % (ca_certs_content, conf["ca_cert_filename"]),
+ omode="wb",
+ )
+ ]
+ )
+
+ mock_load.assert_called_once_with(conf["ca_cert_config"])
+
+ def test_single_cert_to_empty_existing_ca_file(self):
+ """Test adding a single certificate to the trusted CAs
+ when existing ca-certificates.conf is empty"""
+ cert = "CERT1\nLINE2\nLINE3"
+
+ expected = "cloud-init-ca-certs.crt\n"
+
+ self.m_stat.return_value.st_size = 0
+
+ for distro_name in cc_ca_certs.distros:
+ conf = cc_ca_certs._distro_ca_certs_configs(distro_name)
+ with mock.patch.object(
+ util, "write_file", autospec=True
+ ) as m_write:
+
+ cc_ca_certs.add_ca_certs(conf, [cert])
+
+ m_write.assert_has_calls(
+ [mock.call(conf["ca_cert_full_path"], cert, mode=0o644)]
+ )
+ if conf["ca_cert_config"] is not None:
+ m_write.assert_has_calls(
+ [
+ mock.call(
+ conf["ca_cert_config"], expected, omode="wb"
+ )
+ ]
+ )
+
+ def test_multiple_certs(self):
+ """Test adding multiple certificates to the trusted CAs."""
+ certs = ["CERT1\nLINE2\nLINE3", "CERT2\nLINE2\nLINE3"]
+ expected_cert_file = "\n".join(certs)
+ ca_certs_content = "line1\nline2\nline3"
+
+ self.m_stat.return_value.st_size = 1
+
+ for distro_name in cc_ca_certs.distros:
+ conf = cc_ca_certs._distro_ca_certs_configs(distro_name)
+
+ with ExitStack() as mocks:
+ mock_write = mocks.enter_context(
+ mock.patch.object(util, "write_file")
+ )
+ mock_load = mocks.enter_context(
+ mock.patch.object(
+ util, "load_file", return_value=ca_certs_content
+ )
+ )
+
+ cc_ca_certs.add_ca_certs(conf, certs)
+
+ mock_write.assert_has_calls(
+ [
+ mock.call(
+ conf["ca_cert_full_path"],
+ expected_cert_file,
+ mode=0o644,
+ )
+ ]
+ )
+ if conf["ca_cert_config"] is not None:
+ mock_write.assert_has_calls(
+ [
+ mock.call(
+ conf["ca_cert_config"],
+ "%s\n%s\n"
+ % (ca_certs_content, conf["ca_cert_filename"]),
+ omode="wb",
+ )
+ ]
+ )
+
+ mock_load.assert_called_once_with(conf["ca_cert_config"])
+
+
+class TestUpdateCaCerts(unittest.TestCase):
+ def test_commands(self):
+ for distro_name in cc_ca_certs.distros:
+ conf = cc_ca_certs._distro_ca_certs_configs(distro_name)
+ with mock.patch.object(subp, "subp") as mockobj:
+ cc_ca_certs.update_ca_certs(conf)
+ mockobj.assert_called_once_with(
+ conf["ca_cert_update_cmd"], capture=False
+ )
+
+
+class TestRemoveDefaultCaCerts(TestCase):
+ def setUp(self):
+ super(TestRemoveDefaultCaCerts, self).setUp()
+ tmpdir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, tmpdir)
+ self.paths = helpers.Paths(
+ {
+ "cloud_dir": tmpdir,
+ }
+ )
+
+ def test_commands(self):
+ for distro_name in cc_ca_certs.distros:
+ conf = cc_ca_certs._distro_ca_certs_configs(distro_name)
+
+ with ExitStack() as mocks:
+ mock_delete = mocks.enter_context(
+ mock.patch.object(util, "delete_dir_contents")
+ )
+ mock_write = mocks.enter_context(
+ mock.patch.object(util, "write_file")
+ )
+ mock_subp = mocks.enter_context(
+ mock.patch.object(subp, "subp")
+ )
+
+ cc_ca_certs.remove_default_ca_certs(distro_name, conf)
+
+ mock_delete.assert_has_calls(
+ [
+ mock.call(conf["ca_cert_path"]),
+ mock.call(conf["ca_cert_system_path"]),
+ ]
+ )
+
+ if conf["ca_cert_config"] is not None:
+ mock_write.assert_called_once_with(
+ conf["ca_cert_config"], "", mode=0o644
+ )
+
+ if distro_name in ["debian", "ubuntu"]:
+ mock_subp.assert_called_once_with(
+ ("debconf-set-selections", "-"),
+ "ca-certificates ca-certificates/trust_new_crts"
+ " select no",
+ )
+
+
+class TestCACertsSchema:
+ """Directly test schema rather than through handle."""
+
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ (
+ # Valid, yet deprecated schemas
+ ({"ca-certs": {"remove-defaults": True}}, None),
+ # Invalid schemas
+ (
+ {"ca_certs": 1},
+ "ca_certs: 1 is not of type 'object'",
+ ),
+ (
+ {"ca_certs": {}},
+ re.escape("ca_certs: {} does not have enough properties"),
+ ),
+ (
+ {"ca_certs": {"boguskey": 1}},
+ re.escape(
+ "ca_certs: Additional properties are not allowed"
+ " ('boguskey' was unexpected)"
+ ),
+ ),
+ (
+ {"ca_certs": {"remove_defaults": 1}},
+ "ca_certs.remove_defaults: 1 is not of type 'boolean'",
+ ),
+ (
+ {"ca_certs": {"trusted": [1]}},
+ "ca_certs.trusted.0: 1 is not of type 'string'",
+ ),
+ (
+ {"ca_certs": {"trusted": []}},
+ re.escape("ca_certs.trusted: [] is too short"),
+ ),
+ ),
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ """Assert expected schema validation and error messages."""
+ # New-style schema $defs exist in config/cloud-init-schema*.json
+ schema = get_schema()
+ if error_msg is None:
+ validate_cloudconfig_schema(config, schema, strict=True)
+ else:
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, schema, strict=True)
+
+ @mock.patch.object(cc_ca_certs, "update_ca_certs")
+ def test_deprecate_key_warnings(self, update_ca_certs, caplog):
+ """Assert warnings are logged for deprecated keys."""
+ log = logging.getLogger("CALogTest")
+ cloud = get_cloud("ubuntu")
+ cc_ca_certs.handle(
+ "IGNORE", {"ca-certs": {"remove-defaults": False}}, cloud, log, []
+ )
+ expected_warnings = [
+ "DEPRECATION: key 'ca-certs' is now deprecated. Use 'ca_certs'"
+ " instead.",
+ "DEPRECATION: key 'ca-certs.remove-defaults' is now deprecated."
+ " Use 'ca_certs.remove_defaults' instead.",
+ ]
+ for warning in expected_warnings:
+ assert warning in caplog.text
+ assert 1 == update_ca_certs.call_count
+
+ @mock.patch.object(cc_ca_certs, "update_ca_certs")
+ def test_duplicate_keys(self, update_ca_certs, caplog):
+ """Assert warnings are logged for deprecated keys."""
+ log = logging.getLogger("CALogTest")
+ cloud = get_cloud("ubuntu")
+ cc_ca_certs.handle(
+ "IGNORE",
+ {
+ "ca-certs": {"remove-defaults": True},
+ "ca_certs": {"remove_defaults": False},
+ },
+ cloud,
+ log,
+ [],
+ )
+ expected_warning = (
+ "Found both ca-certs (deprecated) and ca_certs config keys."
+ " Ignoring ca-certs."
+ )
+ assert expected_warning in caplog.text
+ assert 1 == update_ca_certs.call_count
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_chef.py b/tests/unittests/config/test_cc_chef.py
new file mode 100644
index 00000000..f86be293
--- /dev/null
+++ b/tests/unittests/config/test_cc_chef.py
@@ -0,0 +1,464 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import json
+import logging
+import os
+import re
+
+import httpretty
+import pytest
+
+from cloudinit import util
+from cloudinit.config import cc_chef
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
+from tests.unittests.helpers import (
+ FilesystemMockingTestCase,
+ HttprettyTestCase,
+ cloud_init_project_dir,
+ mock,
+ skipIf,
+ skipUnlessJsonSchema,
+)
+from tests.unittests.util import get_cloud
+
+LOG = logging.getLogger(__name__)
+
+CLIENT_TEMPL = cloud_init_project_dir("templates/chef_client.rb.tmpl")
+
+# This is adjusted to use http because using with https causes issue
+# in some openssl/httpretty combinations.
+# https://github.com/gabrielfalcao/HTTPretty/issues/242
+# We saw issue in opensuse 42.3 with
+# httpretty=0.8.8-7.1 ndg-httpsclient=0.4.0-3.2 pyOpenSSL=16.0.0-4.1
+OMNIBUS_URL_HTTP = cc_chef.OMNIBUS_URL.replace("https:", "http:")
+
+
+class TestInstallChefOmnibus(HttprettyTestCase):
+ def setUp(self):
+ super(TestInstallChefOmnibus, self).setUp()
+ self.new_root = self.tmp_dir()
+
+ @mock.patch("cloudinit.config.cc_chef.OMNIBUS_URL", OMNIBUS_URL_HTTP)
+ def test_install_chef_from_omnibus_runs_chef_url_content(self):
+ """install_chef_from_omnibus calls subp_blob_in_tempfile."""
+ response = b'#!/bin/bash\necho "Hi Mom"'
+ httpretty.register_uri(
+ httpretty.GET, cc_chef.OMNIBUS_URL, body=response, status=200
+ )
+ ret = (None, None) # stdout, stderr but capture=False
+
+ with mock.patch(
+ "cloudinit.config.cc_chef.subp_blob_in_tempfile", return_value=ret
+ ) as m_subp_blob:
+ cc_chef.install_chef_from_omnibus()
+ # admittedly whitebox, but assuming subp_blob_in_tempfile works
+ # this should be fine.
+ self.assertEqual(
+ [
+ mock.call(
+ blob=response,
+ args=[],
+ basename="chef-omnibus-install",
+ capture=False,
+ )
+ ],
+ m_subp_blob.call_args_list,
+ )
+
+ @mock.patch("cloudinit.config.cc_chef.url_helper.readurl")
+ @mock.patch("cloudinit.config.cc_chef.subp_blob_in_tempfile")
+ def test_install_chef_from_omnibus_retries_url(self, m_subp_blob, m_rdurl):
+ """install_chef_from_omnibus retries OMNIBUS_URL upon failure."""
+
+ class FakeURLResponse(object):
+ contents = '#!/bin/bash\necho "Hi Mom" > {0}/chef.out'.format(
+ self.new_root
+ )
+
+ m_rdurl.return_value = FakeURLResponse()
+
+ cc_chef.install_chef_from_omnibus()
+ expected_kwargs = {
+ "retries": cc_chef.OMNIBUS_URL_RETRIES,
+ "url": cc_chef.OMNIBUS_URL,
+ }
+ self.assertCountEqual(expected_kwargs, m_rdurl.call_args_list[0][1])
+ cc_chef.install_chef_from_omnibus(retries=10)
+ expected_kwargs = {"retries": 10, "url": cc_chef.OMNIBUS_URL}
+ self.assertCountEqual(expected_kwargs, m_rdurl.call_args_list[1][1])
+ expected_subp_kwargs = {
+ "args": ["-v", "2.0"],
+ "basename": "chef-omnibus-install",
+ "blob": m_rdurl.return_value.contents,
+ "capture": False,
+ }
+ self.assertCountEqual(
+ expected_subp_kwargs, m_subp_blob.call_args_list[0][1]
+ )
+
+ @mock.patch("cloudinit.config.cc_chef.OMNIBUS_URL", OMNIBUS_URL_HTTP)
+ @mock.patch("cloudinit.config.cc_chef.subp_blob_in_tempfile")
+ def test_install_chef_from_omnibus_has_omnibus_version(self, m_subp_blob):
+ """install_chef_from_omnibus provides version arg to OMNIBUS_URL."""
+ chef_outfile = self.tmp_path("chef.out", self.new_root)
+ response = '#!/bin/bash\necho "Hi Mom" > {0}'.format(chef_outfile)
+ httpretty.register_uri(
+ httpretty.GET, cc_chef.OMNIBUS_URL, body=response
+ )
+ cc_chef.install_chef_from_omnibus(omnibus_version="2.0")
+
+ called_kwargs = m_subp_blob.call_args_list[0][1]
+ expected_kwargs = {
+ "args": ["-v", "2.0"],
+ "basename": "chef-omnibus-install",
+ "blob": response,
+ "capture": False,
+ }
+ self.assertCountEqual(expected_kwargs, called_kwargs)
+
+
+class TestChef(FilesystemMockingTestCase):
+ def setUp(self):
+ super(TestChef, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ def test_no_config(self):
+ self.patchUtils(self.tmp)
+ self.patchOS(self.tmp)
+
+ cfg = {}
+ cc_chef.handle("chef", cfg, get_cloud(), LOG, [])
+ for d in cc_chef.CHEF_DIRS:
+ self.assertFalse(os.path.isdir(d))
+
+ @skipIf(
+ not os.path.isfile(CLIENT_TEMPL), CLIENT_TEMPL + " is not available"
+ )
+ def test_basic_config(self):
+ """
+ test basic config looks sane
+
+ # This should create a file of the format...
+ # Created by cloud-init v. 0.7.6 on Sat, 11 Oct 2014 23:57:21 +0000
+ chef_license "accept"
+ log_level :info
+ ssl_verify_mode :verify_none
+ log_location "/var/log/chef/client.log"
+ validation_client_name "bob"
+ validation_key "/etc/chef/validation.pem"
+ client_key "/etc/chef/client.pem"
+ chef_server_url "localhost"
+ environment "_default"
+ node_name "iid-datasource-none"
+ json_attribs "/etc/chef/firstboot.json"
+ file_cache_path "/var/cache/chef"
+ file_backup_path "/var/backups/chef"
+ pid_file "/var/run/chef/client.pid"
+ Chef::Log::Formatter.show_time = true
+ encrypted_data_bag_secret "/etc/chef/encrypted_data_bag_secret"
+ """
+ tpl_file = util.load_file(CLIENT_TEMPL)
+ self.patchUtils(self.tmp)
+ self.patchOS(self.tmp)
+
+ util.write_file("/etc/cloud/templates/chef_client.rb.tmpl", tpl_file)
+ cfg = {
+ "chef": {
+ "chef_license": "accept",
+ "server_url": "localhost",
+ "validation_name": "bob",
+ "validation_key": "/etc/chef/vkey.pem",
+ "validation_cert": "this is my cert",
+ "encrypted_data_bag_secret": (
+ "/etc/chef/encrypted_data_bag_secret"
+ ),
+ },
+ }
+ cc_chef.handle("chef", cfg, get_cloud(), LOG, [])
+ for d in cc_chef.CHEF_DIRS:
+ self.assertTrue(os.path.isdir(d))
+ c = util.load_file(cc_chef.CHEF_RB_PATH)
+
+ # the content of these keys is not expected to be rendered to tmpl
+ unrendered_keys = ("validation_cert",)
+ for k, v in cfg["chef"].items():
+ if k in unrendered_keys:
+ continue
+ self.assertIn(v, c)
+ for k, v in cc_chef.CHEF_RB_TPL_DEFAULTS.items():
+ if k in unrendered_keys:
+ continue
+ # the value from the cfg overrides that in the default
+ val = cfg["chef"].get(k, v)
+ if isinstance(val, str):
+ self.assertIn(val, c)
+ c = util.load_file(cc_chef.CHEF_FB_PATH)
+ self.assertEqual({}, json.loads(c))
+
+ def test_firstboot_json(self):
+ self.patchUtils(self.tmp)
+ self.patchOS(self.tmp)
+
+ cfg = {
+ "chef": {
+ "server_url": "localhost",
+ "validation_name": "bob",
+ "run_list": ["a", "b", "c"],
+ "initial_attributes": {
+ "c": "d",
+ },
+ },
+ }
+ cc_chef.handle("chef", cfg, get_cloud(), LOG, [])
+ c = util.load_file(cc_chef.CHEF_FB_PATH)
+ self.assertEqual(
+ {
+ "run_list": ["a", "b", "c"],
+ "c": "d",
+ },
+ json.loads(c),
+ )
+
+ @skipIf(
+ not os.path.isfile(CLIENT_TEMPL), CLIENT_TEMPL + " is not available"
+ )
+ def test_template_deletes(self):
+ tpl_file = util.load_file(CLIENT_TEMPL)
+ self.patchUtils(self.tmp)
+ self.patchOS(self.tmp)
+
+ util.write_file("/etc/cloud/templates/chef_client.rb.tmpl", tpl_file)
+ cfg = {
+ "chef": {
+ "server_url": "localhost",
+ "validation_name": "bob",
+ "json_attribs": None,
+ "show_time": None,
+ },
+ }
+ cc_chef.handle("chef", cfg, get_cloud(), LOG, [])
+ c = util.load_file(cc_chef.CHEF_RB_PATH)
+ self.assertNotIn("json_attribs", c)
+ self.assertNotIn("Formatter.show_time", c)
+
+ @skipIf(
+ not os.path.isfile(CLIENT_TEMPL), CLIENT_TEMPL + " is not available"
+ )
+ def test_validation_cert_and_validation_key(self):
+ # test validation_cert content is written to validation_key path
+ tpl_file = util.load_file(CLIENT_TEMPL)
+ self.patchUtils(self.tmp)
+ self.patchOS(self.tmp)
+
+ util.write_file("/etc/cloud/templates/chef_client.rb.tmpl", tpl_file)
+ v_path = "/etc/chef/vkey.pem"
+ v_cert = "this is my cert"
+ cfg = {
+ "chef": {
+ "server_url": "localhost",
+ "validation_name": "bob",
+ "validation_key": v_path,
+ "validation_cert": v_cert,
+ },
+ }
+ cc_chef.handle("chef", cfg, get_cloud(), LOG, [])
+ content = util.load_file(cc_chef.CHEF_RB_PATH)
+ self.assertIn(v_path, content)
+ util.load_file(v_path)
+ self.assertEqual(v_cert, util.load_file(v_path))
+
+ def test_validation_cert_with_system(self):
+ # test validation_cert content is not written over system file
+ tpl_file = util.load_file(CLIENT_TEMPL)
+ self.patchUtils(self.tmp)
+ self.patchOS(self.tmp)
+
+ v_path = "/etc/chef/vkey.pem"
+ v_cert = "system"
+ expected_cert = "this is the system file certificate"
+ cfg = {
+ "chef": {
+ "server_url": "localhost",
+ "validation_name": "bob",
+ "validation_key": v_path,
+ "validation_cert": v_cert,
+ },
+ }
+ util.write_file("/etc/cloud/templates/chef_client.rb.tmpl", tpl_file)
+ util.write_file(v_path, expected_cert)
+ cc_chef.handle("chef", cfg, get_cloud(), LOG, [])
+ content = util.load_file(cc_chef.CHEF_RB_PATH)
+ self.assertIn(v_path, content)
+ util.load_file(v_path)
+ self.assertEqual(expected_cert, util.load_file(v_path))
+
+
+@skipUnlessJsonSchema()
+class TestBootCMDSchema:
+ """Directly test schema rather than through handle."""
+
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ (
+ # Valid schemas tested by meta.examples in test_schema
+ # Invalid schemas
+ (
+ {"chef": 1},
+ "chef: 1 is not of type 'object'",
+ ),
+ (
+ {"chef": {}},
+ re.escape(" chef: {} does not have enough properties"),
+ ),
+ (
+ {"chef": {"boguskey": True}},
+ re.escape(
+ "chef: Additional properties are not allowed"
+ " ('boguskey' was unexpected)"
+ ),
+ ),
+ (
+ {"chef": {"directories": 1}},
+ "chef.directories: 1 is not of type 'array'",
+ ),
+ (
+ {"chef": {"directories": []}},
+ re.escape("chef.directories: [] is too short"),
+ ),
+ (
+ {"chef": {"directories": [1]}},
+ "chef.directories.0: 1 is not of type 'string'",
+ ),
+ (
+ {"chef": {"directories": ["a", "a"]}},
+ re.escape(
+ "chef.directories: ['a', 'a'] has non-unique elements"
+ ),
+ ),
+ (
+ {"chef": {"validation_cert": 1}},
+ "chef.validation_cert: 1 is not of type 'string'",
+ ),
+ (
+ {"chef": {"validation_key": 1}},
+ "chef.validation_key: 1 is not of type 'string'",
+ ),
+ (
+ {"chef": {"firstboot_path": 1}},
+ "chef.firstboot_path: 1 is not of type 'string'",
+ ),
+ (
+ {"chef": {"client_key": 1}},
+ "chef.client_key: 1 is not of type 'string'",
+ ),
+ (
+ {"chef": {"encrypted_data_bag_secret": 1}},
+ "chef.encrypted_data_bag_secret: 1 is not of type 'string'",
+ ),
+ (
+ {"chef": {"environment": 1}},
+ "chef.environment: 1 is not of type 'string'",
+ ),
+ (
+ {"chef": {"file_backup_path": 1}},
+ "chef.file_backup_path: 1 is not of type 'string'",
+ ),
+ (
+ {"chef": {"file_cache_path": 1}},
+ "chef.file_cache_path: 1 is not of type 'string'",
+ ),
+ (
+ {"chef": {"json_attribs": 1}},
+ "chef.json_attribs: 1 is not of type 'string'",
+ ),
+ (
+ {"chef": {"log_level": 1}},
+ "chef.log_level: 1 is not of type 'string'",
+ ),
+ (
+ {"chef": {"log_location": 1}},
+ "chef.log_location: 1 is not of type 'string'",
+ ),
+ (
+ {"chef": {"node_name": 1}},
+ "chef.node_name: 1 is not of type 'string'",
+ ),
+ (
+ {"chef": {"omnibus_url": 1}},
+ "chef.omnibus_url: 1 is not of type 'string'",
+ ),
+ (
+ {"chef": {"omnibus_url_retries": "one"}},
+ "chef.omnibus_url_retries: 'one' is not of type 'integer'",
+ ),
+ (
+ {"chef": {"omnibus_version": 1}},
+ "chef.omnibus_version: 1 is not of type 'string'",
+ ),
+ (
+ {"chef": {"omnibus_version": 1}},
+ "chef.omnibus_version: 1 is not of type 'string'",
+ ),
+ (
+ {"chef": {"pid_file": 1}},
+ "chef.pid_file: 1 is not of type 'string'",
+ ),
+ (
+ {"chef": {"server_url": 1}},
+ "chef.server_url: 1 is not of type 'string'",
+ ),
+ (
+ {"chef": {"show_time": 1}},
+ "chef.show_time: 1 is not of type 'boolean'",
+ ),
+ (
+ {"chef": {"ssl_verify_mode": 1}},
+ "chef.ssl_verify_mode: 1 is not of type 'string'",
+ ),
+ (
+ {"chef": {"validation_name": 1}},
+ "chef.validation_name: 1 is not of type 'string'",
+ ),
+ (
+ {"chef": {"force_install": 1}},
+ "chef.force_install: 1 is not of type 'boolean'",
+ ),
+ (
+ {"chef": {"initial_attributes": 1}},
+ "chef.initial_attributes: 1 is not of type 'object'",
+ ),
+ (
+ {"chef": {"install_type": 1}},
+ "chef.install_type: 1 is not of type 'string'",
+ ),
+ (
+ {"chef": {"install_type": "bogusenum"}},
+ re.escape(
+ "chef.install_type: 'bogusenum' is not one of"
+ " ['packages', 'gems', 'omnibus']"
+ ),
+ ),
+ (
+ {"chef": {"run_list": 1}},
+ "chef.run_list: 1 is not of type 'array'",
+ ),
+ (
+ {"chef": {"chef_license": 1}},
+ "chef.chef_license: 1 is not of type 'string'",
+ ),
+ ),
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ """Assert expected schema validation and error messages."""
+ # New-style schema $defs exist in config/cloud-init-schema*.json
+ schema = get_schema()
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, schema, strict=True)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_debug.py b/tests/unittests/config/test_cc_debug.py
new file mode 100644
index 00000000..fc8d43dc
--- /dev/null
+++ b/tests/unittests/config/test_cc_debug.py
@@ -0,0 +1,112 @@
+# Copyright (C) 2014 Yahoo! Inc.
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+import logging
+import re
+import shutil
+import tempfile
+
+import pytest
+
+from cloudinit import util
+from cloudinit.config import cc_debug
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
+from tests.unittests.helpers import (
+ FilesystemMockingTestCase,
+ mock,
+ skipUnlessJsonSchema,
+)
+from tests.unittests.util import get_cloud
+
+LOG = logging.getLogger(__name__)
+
+
+@mock.patch("cloudinit.distros.debian.read_system_locale")
+class TestDebug(FilesystemMockingTestCase):
+ def setUp(self):
+ super(TestDebug, self).setUp()
+ self.new_root = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, self.new_root)
+ self.patchUtils(self.new_root)
+
+ def test_debug_write(self, m_locale):
+ m_locale.return_value = "en_US.UTF-8"
+ cfg = {
+ "abc": "123",
+ "c": "\u20a0",
+ "debug": {
+ "verbose": True,
+ # Does not actually write here due to mocking...
+ "output": "/var/log/cloud-init-debug.log",
+ },
+ }
+ cc = get_cloud()
+ cc_debug.handle("cc_debug", cfg, cc, LOG, [])
+ contents = util.load_file("/var/log/cloud-init-debug.log")
+ # Some basic sanity tests...
+ self.assertNotEqual(0, len(contents))
+ for k in cfg.keys():
+ self.assertIn(k, contents)
+
+ def test_debug_no_write(self, m_locale):
+ m_locale.return_value = "en_US.UTF-8"
+ cfg = {
+ "abc": "123",
+ "debug": {
+ "verbose": False,
+ # Does not actually write here due to mocking...
+ "output": "/var/log/cloud-init-debug.log",
+ },
+ }
+ cc = get_cloud()
+ cc_debug.handle("cc_debug", cfg, cc, LOG, [])
+ self.assertRaises(
+ IOError, util.load_file, "/var/log/cloud-init-debug.log"
+ )
+
+
+@skipUnlessJsonSchema()
+class TestDebugSchema:
+ """Directly test schema rather than through handle."""
+
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ (
+ # Valid schemas tested by meta.examples in test_schema
+ # Invalid schemas
+ ({"debug": 1}, "debug: 1 is not of type 'object'"),
+ (
+ {"debug": {}},
+ re.escape("debug: {} does not have enough properties"),
+ ),
+ (
+ {"debug": {"boguskey": True}},
+ re.escape(
+ "Additional properties are not allowed ('boguskey' was"
+ " unexpected)"
+ ),
+ ),
+ (
+ {"debug": {"verbose": 1}},
+ "debug.verbose: 1 is not of type 'boolean'",
+ ),
+ (
+ {"debug": {"output": 1}},
+ "debug.output: 1 is not of type 'string'",
+ ),
+ ),
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ """Assert expected schema validation and error messages."""
+ # New-style schema $defs exist in config/cloud-init-schema*.json
+ schema = get_schema()
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, schema, strict=True)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_disable_ec2_metadata.py b/tests/unittests/config/test_cc_disable_ec2_metadata.py
new file mode 100644
index 00000000..5755e29e
--- /dev/null
+++ b/tests/unittests/config/test_cc_disable_ec2_metadata.py
@@ -0,0 +1,81 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Tests cc_disable_ec2_metadata handler"""
+
+import logging
+
+import pytest
+
+import cloudinit.config.cc_disable_ec2_metadata as ec2_meta
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
+from tests.unittests.helpers import CiTestCase, mock, skipUnlessJsonSchema
+
+LOG = logging.getLogger(__name__)
+
+DISABLE_CFG = {"disable_ec2_metadata": "true"}
+
+
+class TestEC2MetadataRoute(CiTestCase):
+ @mock.patch("cloudinit.config.cc_disable_ec2_metadata.subp.which")
+ @mock.patch("cloudinit.config.cc_disable_ec2_metadata.subp.subp")
+ def test_disable_ifconfig(self, m_subp, m_which):
+ """Set the route if ifconfig command is available"""
+ m_which.side_effect = lambda x: x if x == "ifconfig" else None
+ ec2_meta.handle("foo", DISABLE_CFG, None, LOG, None)
+ m_subp.assert_called_with(
+ ["route", "add", "-host", "169.254.169.254", "reject"],
+ capture=False,
+ )
+
+ @mock.patch("cloudinit.config.cc_disable_ec2_metadata.subp.which")
+ @mock.patch("cloudinit.config.cc_disable_ec2_metadata.subp.subp")
+ def test_disable_ip(self, m_subp, m_which):
+ """Set the route if ip command is available"""
+ m_which.side_effect = lambda x: x if x == "ip" else None
+ ec2_meta.handle("foo", DISABLE_CFG, None, LOG, None)
+ m_subp.assert_called_with(
+ ["ip", "route", "add", "prohibit", "169.254.169.254"],
+ capture=False,
+ )
+
+ @mock.patch("cloudinit.config.cc_disable_ec2_metadata.subp.which")
+ @mock.patch("cloudinit.config.cc_disable_ec2_metadata.subp.subp")
+ def test_disable_no_tool(self, m_subp, m_which):
+ """Log error when neither route nor ip commands are available"""
+ m_which.return_value = None # Find neither ifconfig nor ip
+ ec2_meta.handle("foo", DISABLE_CFG, None, LOG, None)
+ self.assertEqual(
+ [mock.call("ip"), mock.call("ifconfig")], m_which.call_args_list
+ )
+ m_subp.assert_not_called()
+
+
+@skipUnlessJsonSchema()
+class TestDisableEc2MetadataSchema:
+ """Directly test schema rather than through handle."""
+
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ (
+ # Valid schemas tested by meta.examples in test_schema
+ # Invalid schemas
+ (
+ {"disable_ec2_metadata": 1},
+ "disable_ec2_metadata: 1 is not of type 'boolean'",
+ ),
+ ),
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ """Assert expected schema validation and error messages."""
+ # New-style schema $defs exist in config/cloud-init-schema*.json
+ schema = get_schema()
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, schema, strict=True)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_disk_setup.py b/tests/unittests/config/test_cc_disk_setup.py
new file mode 100644
index 00000000..f2796e83
--- /dev/null
+++ b/tests/unittests/config/test_cc_disk_setup.py
@@ -0,0 +1,333 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import random
+import re
+
+import pytest
+
+from cloudinit.config import cc_disk_setup
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
+from tests.unittests.helpers import (
+ CiTestCase,
+ ExitStack,
+ TestCase,
+ mock,
+ skipUnlessJsonSchema,
+)
+
+
+class TestIsDiskUsed(TestCase):
+ def setUp(self):
+ super(TestIsDiskUsed, self).setUp()
+ self.patches = ExitStack()
+ mod_name = "cloudinit.config.cc_disk_setup"
+ self.enumerate_disk = self.patches.enter_context(
+ mock.patch("{0}.enumerate_disk".format(mod_name))
+ )
+ self.check_fs = self.patches.enter_context(
+ mock.patch("{0}.check_fs".format(mod_name))
+ )
+
+ def tearDown(self):
+ super(TestIsDiskUsed, self).tearDown()
+ self.patches.close()
+
+ def test_multiple_child_nodes_returns_true(self):
+ self.enumerate_disk.return_value = (mock.MagicMock() for _ in range(2))
+ self.check_fs.return_value = (mock.MagicMock(), None, mock.MagicMock())
+ self.assertTrue(cc_disk_setup.is_disk_used(mock.MagicMock()))
+
+ def test_valid_filesystem_returns_true(self):
+ self.enumerate_disk.return_value = (mock.MagicMock() for _ in range(1))
+ self.check_fs.return_value = (
+ mock.MagicMock(),
+ "ext4",
+ mock.MagicMock(),
+ )
+ self.assertTrue(cc_disk_setup.is_disk_used(mock.MagicMock()))
+
+ def test_one_child_nodes_and_no_fs_returns_false(self):
+ self.enumerate_disk.return_value = (mock.MagicMock() for _ in range(1))
+ self.check_fs.return_value = (mock.MagicMock(), None, mock.MagicMock())
+ self.assertFalse(cc_disk_setup.is_disk_used(mock.MagicMock()))
+
+
+class TestGetMbrHddSize(TestCase):
+ def setUp(self):
+ super(TestGetMbrHddSize, self).setUp()
+ self.patches = ExitStack()
+ self.subp = self.patches.enter_context(
+ mock.patch.object(cc_disk_setup.subp, "subp")
+ )
+
+ def tearDown(self):
+ super(TestGetMbrHddSize, self).tearDown()
+ self.patches.close()
+
+ def _configure_subp_mock(self, hdd_size_in_bytes, sector_size_in_bytes):
+ def _subp(cmd, *args, **kwargs):
+ self.assertEqual(3, len(cmd))
+ if "--getsize64" in cmd:
+ return hdd_size_in_bytes, None
+ elif "--getss" in cmd:
+ return sector_size_in_bytes, None
+ raise Exception("Unexpected blockdev command called")
+
+ self.subp.side_effect = _subp
+
+ def _test_for_sector_size(self, sector_size):
+ size_in_bytes = random.randint(10000, 10000000) * 512
+ size_in_sectors = size_in_bytes / sector_size
+ self._configure_subp_mock(size_in_bytes, sector_size)
+ self.assertEqual(
+ size_in_sectors, cc_disk_setup.get_hdd_size("/dev/sda1")
+ )
+
+ def test_size_for_512_byte_sectors(self):
+ self._test_for_sector_size(512)
+
+ def test_size_for_1024_byte_sectors(self):
+ self._test_for_sector_size(1024)
+
+ def test_size_for_2048_byte_sectors(self):
+ self._test_for_sector_size(2048)
+
+ def test_size_for_4096_byte_sectors(self):
+ self._test_for_sector_size(4096)
+
+
+class TestGetPartitionMbrLayout(TestCase):
+ def test_single_partition_using_boolean(self):
+ self.assertEqual(
+ "0,", cc_disk_setup.get_partition_mbr_layout(1000, True)
+ )
+
+ def test_single_partition_using_list(self):
+ disk_size = random.randint(1000000, 1000000000000)
+ self.assertEqual(
+ ",,83", cc_disk_setup.get_partition_mbr_layout(disk_size, [100])
+ )
+
+ def test_half_and_half(self):
+ disk_size = random.randint(1000000, 1000000000000)
+ expected_partition_size = int(float(disk_size) / 2)
+ self.assertEqual(
+ ",{0},83\n,,83".format(expected_partition_size),
+ cc_disk_setup.get_partition_mbr_layout(disk_size, [50, 50]),
+ )
+
+ def test_thirds_with_different_partition_type(self):
+ disk_size = random.randint(1000000, 1000000000000)
+ expected_partition_size = int(float(disk_size) * 0.33)
+ self.assertEqual(
+ ",{0},83\n,,82".format(expected_partition_size),
+ cc_disk_setup.get_partition_mbr_layout(disk_size, [33, [66, 82]]),
+ )
+
+
+class TestUpdateFsSetupDevices(TestCase):
+ def test_regression_1634678(self):
+ # Cf. https://bugs.launchpad.net/cloud-init/+bug/1634678
+ fs_setup = {
+ "partition": "auto",
+ "device": "/dev/xvdb1",
+ "overwrite": False,
+ "label": "test",
+ "filesystem": "ext4",
+ }
+
+ cc_disk_setup.update_fs_setup_devices(
+ [fs_setup], lambda device: device
+ )
+
+ self.assertEqual(
+ {
+ "_origname": "/dev/xvdb1",
+ "partition": "auto",
+ "device": "/dev/xvdb1",
+ "overwrite": False,
+ "label": "test",
+ "filesystem": "ext4",
+ },
+ fs_setup,
+ )
+
+ def test_dotted_devname(self):
+ fs_setup = {
+ "partition": "auto",
+ "device": "ephemeral0.0",
+ "label": "test2",
+ "filesystem": "xfs",
+ }
+
+ cc_disk_setup.update_fs_setup_devices(
+ [fs_setup], lambda device: device
+ )
+
+ self.assertEqual(
+ {
+ "_origname": "ephemeral0.0",
+ "_partition": "auto",
+ "partition": "0",
+ "device": "ephemeral0",
+ "label": "test2",
+ "filesystem": "xfs",
+ },
+ fs_setup,
+ )
+
+ def test_dotted_devname_populates_partition(self):
+ fs_setup = {
+ "device": "ephemeral0.1",
+ "label": "test2",
+ "filesystem": "xfs",
+ }
+ cc_disk_setup.update_fs_setup_devices(
+ [fs_setup], lambda device: device
+ )
+ self.assertEqual(
+ {
+ "_origname": "ephemeral0.1",
+ "device": "ephemeral0",
+ "partition": "1",
+ "label": "test2",
+ "filesystem": "xfs",
+ },
+ fs_setup,
+ )
+
+
+@mock.patch(
+ "cloudinit.config.cc_disk_setup.assert_and_settle_device",
+ return_value=None,
+)
+@mock.patch(
+ "cloudinit.config.cc_disk_setup.find_device_node",
+ return_value=("/dev/xdb1", False),
+)
+@mock.patch("cloudinit.config.cc_disk_setup.device_type", return_value=None)
+@mock.patch("cloudinit.config.cc_disk_setup.subp.subp", return_value=("", ""))
+class TestMkfsCommandHandling(CiTestCase):
+
+ with_logs = True
+
+ def test_with_cmd(self, subp, *args):
+ """mkfs honors cmd and logs warnings when extra_opts or overwrite are
+ provided."""
+ cc_disk_setup.mkfs(
+ {
+ "cmd": "mkfs -t %(filesystem)s -L %(label)s %(device)s",
+ "filesystem": "ext4",
+ "device": "/dev/xdb1",
+ "label": "with_cmd",
+ "extra_opts": ["should", "generate", "warning"],
+ "overwrite": "should generate warning too",
+ }
+ )
+
+ self.assertIn(
+ "extra_opts "
+ + "ignored because cmd was specified: mkfs -t ext4 -L with_cmd "
+ + "/dev/xdb1",
+ self.logs.getvalue(),
+ )
+ self.assertIn(
+ "overwrite "
+ + "ignored because cmd was specified: mkfs -t ext4 -L with_cmd "
+ + "/dev/xdb1",
+ self.logs.getvalue(),
+ )
+
+ subp.assert_called_once_with(
+ "mkfs -t ext4 -L with_cmd /dev/xdb1", shell=True
+ )
+
+ @mock.patch("cloudinit.config.cc_disk_setup.subp.which")
+ def test_overwrite_and_extra_opts_without_cmd(self, m_which, subp, *args):
+ """mkfs observes extra_opts and overwrite settings when cmd is not
+ present."""
+ m_which.side_effect = lambda p: {"mkfs.ext4": "/sbin/mkfs.ext4"}[p]
+ cc_disk_setup.mkfs(
+ {
+ "filesystem": "ext4",
+ "device": "/dev/xdb1",
+ "label": "without_cmd",
+ "extra_opts": ["are", "added"],
+ "overwrite": True,
+ }
+ )
+
+ subp.assert_called_once_with(
+ [
+ "/sbin/mkfs.ext4",
+ "/dev/xdb1",
+ "-L",
+ "without_cmd",
+ "-F",
+ "are",
+ "added",
+ ],
+ shell=False,
+ )
+
+ @mock.patch("cloudinit.config.cc_disk_setup.subp.which")
+ def test_mkswap(self, m_which, subp, *args):
+ """mkfs observes extra_opts and overwrite settings when cmd is not
+ present."""
+ m_which.side_effect = iter([None, "/sbin/mkswap"])
+ cc_disk_setup.mkfs(
+ {
+ "filesystem": "swap",
+ "device": "/dev/xdb1",
+ "label": "swap",
+ "overwrite": True,
+ }
+ )
+
+ self.assertEqual(
+ [mock.call("mkfs.swap"), mock.call("mkswap")],
+ m_which.call_args_list,
+ )
+ subp.assert_called_once_with(
+ ["/sbin/mkswap", "/dev/xdb1", "-L", "swap", "-f"], shell=False
+ )
+
+
+@skipUnlessJsonSchema()
+class TestDebugSchema:
+ """Directly test schema rather than through handle."""
+
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ (
+ # Valid schemas tested by meta.examples in test_schema
+ # Invalid schemas
+ ({"disk_setup": 1}, "disk_setup: 1 is not of type 'object'"),
+ ({"fs_setup": 1}, "fs_setup: 1 is not of type 'array'"),
+ (
+ {"device_aliases": 1},
+ "device_aliases: 1 is not of type 'object'",
+ ),
+ (
+ {"debug": {"boguskey": True}},
+ re.escape(
+ "Additional properties are not allowed ('boguskey' was"
+ " unexpected)"
+ ),
+ ),
+ ),
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ """Assert expected schema validation and error messages."""
+ # New-style schema $defs exist in config/cloud-init-schema*.json
+ schema = get_schema()
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, schema, strict=True)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_final_message.py b/tests/unittests/config/test_cc_final_message.py
new file mode 100644
index 00000000..46ba99b2
--- /dev/null
+++ b/tests/unittests/config/test_cc_final_message.py
@@ -0,0 +1,46 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+import logging
+from unittest import mock
+
+import pytest
+
+from cloudinit.config.cc_final_message import handle
+
+
+class TestHandle:
+ # TODO: Expand these tests to cover full functionality; currently they only
+ # cover the logic around how the boot-finished file is written (and not its
+ # contents).
+
+ @pytest.mark.parametrize(
+ "instance_dir_exists,file_is_written,expected_log_substring",
+ [
+ (True, True, None),
+ (False, False, "Failed to write boot finished file "),
+ ],
+ )
+ def test_boot_finished_written(
+ self,
+ instance_dir_exists,
+ file_is_written,
+ expected_log_substring,
+ caplog,
+ tmpdir,
+ ):
+ instance_dir = tmpdir.join("var/lib/cloud/instance")
+ if instance_dir_exists:
+ instance_dir.ensure_dir()
+ boot_finished = instance_dir.join("boot-finished")
+
+ m_cloud = mock.Mock(
+ paths=mock.Mock(boot_finished=boot_finished.strpath)
+ )
+
+ handle(None, {}, m_cloud, logging.getLogger(), [])
+
+ # We should not change the status of the instance directory
+ assert instance_dir_exists == instance_dir.exists()
+ assert file_is_written == boot_finished.exists()
+
+ if expected_log_substring:
+ assert expected_log_substring in caplog.text
diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/config/test_cc_growpart.py
index 7f039b79..ba66f136 100644
--- a/tests/unittests/test_handler/test_handler_growpart.py
+++ b/tests/unittests/config/test_cc_growpart.py
@@ -1,19 +1,19 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import cloud
-from cloudinit.config import cc_growpart
-from cloudinit import subp
-
-from cloudinit.tests.helpers import TestCase
-
import errno
import logging
import os
import re
+import shutil
+import stat
import unittest
from contextlib import ExitStack
from unittest import mock
+from cloudinit import cloud, subp, temp_utils
+from cloudinit.config import cc_growpart
+from tests.unittests.helpers import TestCase
+
# growpart:
# mode: auto # off, on, auto, 'growpart'
# devices: ['root']
@@ -58,6 +58,33 @@ usage: gpart add -t type [-a alignment] [-b start] <SNIP> geom
"""
+class Dir:
+ """Stub object"""
+
+ def __init__(self, name):
+ self.name = name
+ self.st_mode = name
+
+ def is_dir(self, *args, **kwargs):
+ return True
+
+ def stat(self, *args, **kwargs):
+ return self
+
+
+class Scanner:
+ """Stub object"""
+
+ def __enter__(self):
+ return (
+ Dir(""),
+ Dir(""),
+ )
+
+ def __exit__(self, *args):
+ pass
+
+
class TestDisabled(unittest.TestCase):
def setUp(self):
super(TestDisabled, self).setUp()
@@ -72,11 +99,12 @@ class TestDisabled(unittest.TestCase):
# Test that nothing is done if mode is off.
# this really only verifies that resizer_factory isn't called
- config = {'growpart': {'mode': 'off'}}
+ config = {"growpart": {"mode": "off"}}
- with mock.patch.object(cc_growpart, 'resizer_factory') as mockobj:
- self.handle(self.name, config, self.cloud_init, self.log,
- self.args)
+ with mock.patch.object(cc_growpart, "resizer_factory") as mockobj:
+ self.handle(
+ self.name, config, self.cloud_init, self.log, self.args
+ )
self.assertEqual(mockobj.call_count, 0)
@@ -91,79 +119,154 @@ class TestConfig(TestCase):
self.cloud_init = None
self.handle = cc_growpart.handle
+ self.tmppath = "/tmp/cloudinit-test-file"
+ self.tmpdir = os.scandir("/tmp")
+ self.tmpfile = open(self.tmppath, "w")
+
+ def tearDown(self):
+ self.tmpfile.close()
+ os.remove(self.tmppath)
@mock.patch.dict("os.environ", clear=True)
def test_no_resizers_auto_is_fine(self):
with mock.patch.object(
- subp, 'subp',
- return_value=(HELP_GROWPART_NO_RESIZE, "")) as mockobj:
-
- config = {'growpart': {'mode': 'auto'}}
- self.handle(self.name, config, self.cloud_init, self.log,
- self.args)
-
- mockobj.assert_has_calls([
- mock.call(['growpart', '--help'], env={'LANG': 'C'}),
- mock.call(['gpart', 'help'], env={'LANG': 'C'}, rcs=[0, 1])])
+ subp, "subp", return_value=(HELP_GROWPART_NO_RESIZE, "")
+ ) as mockobj:
+
+ config = {"growpart": {"mode": "auto"}}
+ self.handle(
+ self.name, config, self.cloud_init, self.log, self.args
+ )
+
+ mockobj.assert_has_calls(
+ [
+ mock.call(["growpart", "--help"], env={"LANG": "C"}),
+ mock.call(
+ ["gpart", "help"], env={"LANG": "C"}, rcs=[0, 1]
+ ),
+ ]
+ )
@mock.patch.dict("os.environ", clear=True)
def test_no_resizers_mode_growpart_is_exception(self):
with mock.patch.object(
- subp, 'subp',
- return_value=(HELP_GROWPART_NO_RESIZE, "")) as mockobj:
- config = {'growpart': {'mode': "growpart"}}
+ subp, "subp", return_value=(HELP_GROWPART_NO_RESIZE, "")
+ ) as mockobj:
+ config = {"growpart": {"mode": "growpart"}}
self.assertRaises(
- ValueError, self.handle, self.name, config,
- self.cloud_init, self.log, self.args)
+ ValueError,
+ self.handle,
+ self.name,
+ config,
+ self.cloud_init,
+ self.log,
+ self.args,
+ )
mockobj.assert_called_once_with(
- ['growpart', '--help'], env={'LANG': 'C'})
+ ["growpart", "--help"], env={"LANG": "C"}
+ )
@mock.patch.dict("os.environ", clear=True)
def test_mode_auto_prefers_growpart(self):
with mock.patch.object(
- subp, 'subp',
- return_value=(HELP_GROWPART_RESIZE, "")) as mockobj:
+ subp, "subp", return_value=(HELP_GROWPART_RESIZE, "")
+ ) as mockobj:
ret = cc_growpart.resizer_factory(mode="auto")
self.assertIsInstance(ret, cc_growpart.ResizeGrowPart)
mockobj.assert_called_once_with(
- ['growpart', '--help'], env={'LANG': 'C'})
+ ["growpart", "--help"], env={"LANG": "C"}
+ )
+
+ @mock.patch.dict("os.environ", {"LANG": "cs_CZ.UTF-8"}, clear=True)
+ @mock.patch.object(temp_utils, "mkdtemp", return_value="/tmp/much-random")
+ @mock.patch.object(stat, "S_ISDIR", return_value=False)
+ @mock.patch.object(os.path, "samestat", return_value=True)
+ @mock.patch.object(os.path, "join", return_value="/tmp")
+ @mock.patch.object(os, "scandir", return_value=Scanner())
+ @mock.patch.object(os, "mkdir")
+ @mock.patch.object(os, "unlink")
+ @mock.patch.object(os, "rmdir")
+ @mock.patch.object(os, "open", return_value=1)
+ @mock.patch.object(os, "close")
+ @mock.patch.object(shutil, "rmtree")
+ @mock.patch.object(os, "lseek", return_value=1024)
+ @mock.patch.object(os, "lstat", return_value="interesting metadata")
+ def test_force_lang_check_tempfile(self, *args, **kwargs):
+ with mock.patch.object(
+ subp, "subp", return_value=(HELP_GROWPART_RESIZE, "")
+ ) as mockobj:
- @mock.patch.dict("os.environ", clear=True)
+ ret = cc_growpart.resizer_factory(mode="auto")
+ self.assertIsInstance(ret, cc_growpart.ResizeGrowPart)
+ diskdev = "/dev/sdb"
+ partnum = 1
+ partdev = "/dev/sdb"
+ ret.resize(diskdev, partnum, partdev)
+ mockobj.assert_has_calls(
+ [
+ mock.call(
+ ["growpart", "--dry-run", diskdev, partnum],
+ env={"LANG": "C", "TMPDIR": "/tmp"},
+ ),
+ mock.call(
+ ["growpart", diskdev, partnum],
+ env={"LANG": "C", "TMPDIR": "/tmp"},
+ ),
+ ]
+ )
+
+ @mock.patch.dict("os.environ", {"LANG": "cs_CZ.UTF-8"}, clear=True)
def test_mode_auto_falls_back_to_gpart(self):
with mock.patch.object(
- subp, 'subp',
- return_value=("", HELP_GPART)) as mockobj:
+ subp, "subp", return_value=("", HELP_GPART)
+ ) as mockobj:
ret = cc_growpart.resizer_factory(mode="auto")
self.assertIsInstance(ret, cc_growpart.ResizeGpart)
- mockobj.assert_has_calls([
- mock.call(['growpart', '--help'], env={'LANG': 'C'}),
- mock.call(['gpart', 'help'], env={'LANG': 'C'}, rcs=[0, 1])])
+ mockobj.assert_has_calls(
+ [
+ mock.call(["growpart", "--help"], env={"LANG": "C"}),
+ mock.call(
+ ["gpart", "help"], env={"LANG": "C"}, rcs=[0, 1]
+ ),
+ ]
+ )
def test_handle_with_no_growpart_entry(self):
# if no 'growpart' entry in config, then mode=auto should be used
myresizer = object()
- retval = (("/", cc_growpart.RESIZE.CHANGED, "my-message",),)
+ retval = (
+ (
+ "/",
+ cc_growpart.RESIZE.CHANGED,
+ "my-message",
+ ),
+ )
with ExitStack() as mocks:
factory = mocks.enter_context(
- mock.patch.object(cc_growpart, 'resizer_factory',
- return_value=myresizer))
+ mock.patch.object(
+ cc_growpart, "resizer_factory", return_value=myresizer
+ )
+ )
rsdevs = mocks.enter_context(
- mock.patch.object(cc_growpart, 'resize_devices',
- return_value=retval))
+ mock.patch.object(
+ cc_growpart, "resize_devices", return_value=retval
+ )
+ )
mocks.enter_context(
- mock.patch.object(cc_growpart, 'RESIZERS',
- (('mysizer', object),)
- ))
+ mock.patch.object(
+ cc_growpart, "RESIZERS", (("mysizer", object),)
+ )
+ )
self.handle(self.name, {}, self.cloud_init, self.log, self.args)
- factory.assert_called_once_with('auto')
- rsdevs.assert_called_once_with(myresizer, ['/'])
+ factory.assert_called_once_with("auto")
+ rsdevs.assert_called_once_with(myresizer, ["/"])
class TestResize(unittest.TestCase):
@@ -177,9 +280,18 @@ class TestResize(unittest.TestCase):
# this patches out devent2dev, os.stat, and device_part_info
# so in the end, doesn't test a lot
devs = ["/dev/XXda1", "/dev/YYda2"]
- devstat_ret = Bunch(st_mode=25008, st_ino=6078, st_dev=5,
- st_nlink=1, st_uid=0, st_gid=6, st_size=0,
- st_atime=0, st_mtime=0, st_ctime=0)
+ devstat_ret = Bunch(
+ st_mode=25008,
+ st_ino=6078,
+ st_dev=5,
+ st_nlink=1,
+ st_uid=0,
+ st_gid=6,
+ st_size=0,
+ st_atime=0,
+ st_mtime=0,
+ st_ctime=0,
+ )
enoent = ["/dev/NOENT"]
real_stat = os.stat
resize_calls = []
@@ -213,12 +325,15 @@ class TestResize(unittest.TestCase):
return f
return None
- self.assertEqual(cc_growpart.RESIZE.NOCHANGE,
- find("/dev/XXda1", resized)[1])
- self.assertEqual(cc_growpart.RESIZE.CHANGED,
- find("/dev/YYda2", resized)[1])
- self.assertEqual(cc_growpart.RESIZE.SKIPPED,
- find(enoent[0], resized)[1])
+ self.assertEqual(
+ cc_growpart.RESIZE.NOCHANGE, find("/dev/XXda1", resized)[1]
+ )
+ self.assertEqual(
+ cc_growpart.RESIZE.CHANGED, find("/dev/YYda2", resized)[1]
+ )
+ self.assertEqual(
+ cc_growpart.RESIZE.SKIPPED, find(enoent[0], resized)[1]
+ )
# self.assertEqual(resize_calls,
# [("/dev/XXda", "1", "/dev/XXda1"),
# ("/dev/YYda", "2", "/dev/YYda2")])
diff --git a/tests/unittests/config/test_cc_grub_dpkg.py b/tests/unittests/config/test_cc_grub_dpkg.py
new file mode 100644
index 00000000..5151a7b5
--- /dev/null
+++ b/tests/unittests/config/test_cc_grub_dpkg.py
@@ -0,0 +1,187 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from logging import Logger
+from unittest import mock
+
+import pytest
+
+from cloudinit.config.cc_grub_dpkg import fetch_idevs, handle
+from cloudinit.subp import ProcessExecutionError
+
+
+class TestFetchIdevs:
+ """Tests cc_grub_dpkg.fetch_idevs()"""
+
+ # Note: udevadm info returns devices in a large single line string
+ @pytest.mark.parametrize(
+ "grub_output,path_exists,expected_log_call,udevadm_output"
+ ",expected_idevs",
+ [
+ # Inside a container, grub not installed
+ (
+ ProcessExecutionError(reason=FileNotFoundError()),
+ False,
+ mock.call("'grub-probe' not found in $PATH"),
+ "",
+ "",
+ ),
+ # Inside a container, grub installed
+ (
+ ProcessExecutionError(stderr="failed to get canonical path"),
+ False,
+ mock.call("grub-probe 'failed to get canonical path'"),
+ "",
+ "",
+ ),
+ # KVM Instance
+ (
+ ["/dev/vda"],
+ True,
+ None,
+ (
+ "/dev/disk/by-path/pci-0000:00:00.0 ",
+ "/dev/disk/by-path/virtio-pci-0000:00:00.0 ",
+ ),
+ "/dev/vda",
+ ),
+ # Xen Instance
+ (
+ ["/dev/xvda"],
+ True,
+ None,
+ "",
+ "/dev/xvda",
+ ),
+ # NVMe Hardware Instance
+ (
+ ["/dev/nvme1n1"],
+ True,
+ None,
+ (
+ "/dev/disk/by-id/nvme-Company_hash000 ",
+ "/dev/disk/by-id/nvme-nvme.000-000-000-000-000 ",
+ "/dev/disk/by-path/pci-0000:00:00.0-nvme-0 ",
+ ),
+ "/dev/disk/by-id/nvme-Company_hash000",
+ ),
+ # SCSI Hardware Instance
+ (
+ ["/dev/sda"],
+ True,
+ None,
+ (
+ "/dev/disk/by-id/company-user-1 ",
+ "/dev/disk/by-id/scsi-0Company_user-1 ",
+ "/dev/disk/by-path/pci-0000:00:00.0-scsi-0:0:0:0 ",
+ ),
+ "/dev/disk/by-id/company-user-1",
+ ),
+ ],
+ )
+ @mock.patch("cloudinit.config.cc_grub_dpkg.util.logexc")
+ @mock.patch("cloudinit.config.cc_grub_dpkg.os.path.exists")
+ @mock.patch("cloudinit.config.cc_grub_dpkg.subp.subp")
+ def test_fetch_idevs(
+ self,
+ m_subp,
+ m_exists,
+ m_logexc,
+ grub_output,
+ path_exists,
+ expected_log_call,
+ udevadm_output,
+ expected_idevs,
+ ):
+ """Tests outputs from grub-probe and udevadm info against grub-dpkg"""
+ m_subp.side_effect = [grub_output, ["".join(udevadm_output)]]
+ m_exists.return_value = path_exists
+ log = mock.Mock(spec=Logger)
+ idevs = fetch_idevs(log)
+ assert expected_idevs == idevs
+ if expected_log_call is not None:
+ assert expected_log_call in log.debug.call_args_list
+
+
+class TestHandle:
+ """Tests cc_grub_dpkg.handle()"""
+
+ @pytest.mark.parametrize(
+ "cfg_idevs,cfg_idevs_empty,fetch_idevs_output,expected_log_output",
+ [
+ (
+ # No configuration
+ None,
+ None,
+ "/dev/disk/by-id/nvme-Company_hash000",
+ (
+ "Setting grub debconf-set-selections with ",
+ "'/dev/disk/by-id/nvme-Company_hash000','false'",
+ ),
+ ),
+ (
+ # idevs set, idevs_empty unset
+ "/dev/sda",
+ None,
+ "/dev/sda",
+ (
+ "Setting grub debconf-set-selections with ",
+ "'/dev/sda','false'",
+ ),
+ ),
+ (
+ # idevs unset, idevs_empty set
+ None,
+ "true",
+ "/dev/xvda",
+ (
+ "Setting grub debconf-set-selections with ",
+ "'/dev/xvda','true'",
+ ),
+ ),
+ (
+ # idevs set, idevs_empty set
+ "/dev/vda",
+ "false",
+ "/dev/disk/by-id/company-user-1",
+ (
+ "Setting grub debconf-set-selections with ",
+ "'/dev/vda','false'",
+ ),
+ ),
+ (
+ # idevs set, idevs_empty set
+ # Respect what the user defines, even if its logically wrong
+ "/dev/nvme0n1",
+ "true",
+ "",
+ (
+ "Setting grub debconf-set-selections with ",
+ "'/dev/nvme0n1','true'",
+ ),
+ ),
+ ],
+ )
+ @mock.patch("cloudinit.config.cc_grub_dpkg.fetch_idevs")
+ @mock.patch("cloudinit.config.cc_grub_dpkg.util.get_cfg_option_str")
+ @mock.patch("cloudinit.config.cc_grub_dpkg.util.logexc")
+ @mock.patch("cloudinit.config.cc_grub_dpkg.subp.subp")
+ def test_handle(
+ self,
+ m_subp,
+ m_logexc,
+ m_get_cfg_str,
+ m_fetch_idevs,
+ cfg_idevs,
+ cfg_idevs_empty,
+ fetch_idevs_output,
+ expected_log_output,
+ ):
+ """Test setting of correct debconf database entries"""
+ m_get_cfg_str.side_effect = [cfg_idevs, cfg_idevs_empty]
+ m_fetch_idevs.return_value = fetch_idevs_output
+ log = mock.Mock(spec=Logger)
+ handle(mock.Mock(), mock.Mock(), mock.Mock(), log, mock.Mock())
+ log.debug.assert_called_with("".join(expected_log_output))
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_install_hotplug.py b/tests/unittests/config/test_cc_install_hotplug.py
new file mode 100644
index 00000000..e67fce60
--- /dev/null
+++ b/tests/unittests/config/test_cc_install_hotplug.py
@@ -0,0 +1,129 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+from collections import namedtuple
+from unittest import mock
+
+import pytest
+
+from cloudinit.config.cc_install_hotplug import (
+ HOTPLUG_UDEV_PATH,
+ HOTPLUG_UDEV_RULES_TEMPLATE,
+ handle,
+)
+from cloudinit.event import EventScope, EventType
+
+
+@pytest.fixture()
+def mocks():
+ m_update_enabled = mock.patch("cloudinit.stages.update_event_enabled")
+ m_write = mock.patch("cloudinit.util.write_file", autospec=True)
+ m_del = mock.patch("cloudinit.util.del_file", autospec=True)
+ m_subp = mock.patch("cloudinit.subp.subp")
+ m_which = mock.patch("cloudinit.subp.which", return_value=None)
+ m_path_exists = mock.patch("os.path.exists", return_value=False)
+
+ yield namedtuple(
+ "Mocks", "m_update_enabled m_write m_del m_subp m_which m_path_exists"
+ )(
+ m_update_enabled.start(),
+ m_write.start(),
+ m_del.start(),
+ m_subp.start(),
+ m_which.start(),
+ m_path_exists.start(),
+ )
+
+ m_update_enabled.stop()
+ m_write.stop()
+ m_del.stop()
+ m_subp.stop()
+ m_which.stop()
+ m_path_exists.stop()
+
+
+class TestInstallHotplug:
+ @pytest.mark.parametrize("libexec_exists", [True, False])
+ def test_rules_installed_when_supported_and_enabled(
+ self, mocks, libexec_exists
+ ):
+ mocks.m_which.return_value = "udevadm"
+ mocks.m_update_enabled.return_value = True
+ m_cloud = mock.MagicMock()
+ m_cloud.datasource.get_supported_events.return_value = {
+ EventScope.NETWORK: {EventType.HOTPLUG}
+ }
+
+ if libexec_exists:
+ libexecdir = "/usr/libexec/cloud-init"
+ else:
+ libexecdir = "/usr/lib/cloud-init"
+ with mock.patch("os.path.exists", return_value=libexec_exists):
+ handle(None, {}, m_cloud, mock.Mock(), None)
+ mocks.m_write.assert_called_once_with(
+ filename=HOTPLUG_UDEV_PATH,
+ content=HOTPLUG_UDEV_RULES_TEMPLATE.format(
+ libexecdir=libexecdir
+ ),
+ )
+ assert mocks.m_subp.call_args_list == [
+ mock.call(
+ [
+ "udevadm",
+ "control",
+ "--reload-rules",
+ ]
+ )
+ ]
+ assert mocks.m_del.call_args_list == []
+
+ def test_rules_not_installed_when_unsupported(self, mocks):
+ mocks.m_update_enabled.return_value = True
+ m_cloud = mock.MagicMock()
+ m_cloud.datasource.get_supported_events.return_value = {}
+
+ handle(None, {}, m_cloud, mock.Mock(), None)
+ assert mocks.m_write.call_args_list == []
+ assert mocks.m_del.call_args_list == []
+ assert mocks.m_subp.call_args_list == []
+
+ def test_rules_not_installed_when_disabled(self, mocks):
+ mocks.m_update_enabled.return_value = False
+ m_cloud = mock.MagicMock()
+ m_cloud.datasource.get_supported_events.return_value = {
+ EventScope.NETWORK: {EventType.HOTPLUG}
+ }
+
+ handle(None, {}, m_cloud, mock.Mock(), None)
+ assert mocks.m_write.call_args_list == []
+ assert mocks.m_del.call_args_list == []
+ assert mocks.m_subp.call_args_list == []
+
+ def test_rules_uninstalled_when_disabled(self, mocks):
+ mocks.m_path_exists.return_value = True
+ mocks.m_update_enabled.return_value = False
+ m_cloud = mock.MagicMock()
+ m_cloud.datasource.get_supported_events.return_value = {}
+
+ handle(None, {}, m_cloud, mock.Mock(), None)
+ mocks.m_del.assert_called_with(HOTPLUG_UDEV_PATH)
+ assert mocks.m_subp.call_args_list == [
+ mock.call(
+ [
+ "udevadm",
+ "control",
+ "--reload-rules",
+ ]
+ )
+ ]
+ assert mocks.m_write.call_args_list == []
+
+ def test_rules_not_installed_when_no_udevadm(self, mocks):
+ mocks.m_update_enabled.return_value = True
+ m_cloud = mock.MagicMock()
+ m_cloud.datasource.get_supported_events.return_value = {
+ EventScope.NETWORK: {EventType.HOTPLUG}
+ }
+
+ handle(None, {}, m_cloud, mock.Mock(), None)
+ assert mocks.m_del.call_args_list == []
+ assert mocks.m_write.call_args_list == []
+ assert mocks.m_subp.call_args_list == []
diff --git a/tests/unittests/config/test_cc_keys_to_console.py b/tests/unittests/config/test_cc_keys_to_console.py
new file mode 100644
index 00000000..9efc2b48
--- /dev/null
+++ b/tests/unittests/config/test_cc_keys_to_console.py
@@ -0,0 +1,40 @@
+"""Tests for cc_keys_to_console."""
+from unittest import mock
+
+import pytest
+
+from cloudinit.config import cc_keys_to_console
+
+
+class TestHandle:
+ """Tests for cloudinit.config.cc_keys_to_console.handle.
+
+ TODO: These tests only cover the emit_keys_to_console config option, they
+ should be expanded to cover the full functionality.
+ """
+
+ @mock.patch("cloudinit.config.cc_keys_to_console.util.multi_log")
+ @mock.patch("cloudinit.config.cc_keys_to_console.os.path.exists")
+ @mock.patch("cloudinit.config.cc_keys_to_console.subp.subp")
+ @pytest.mark.parametrize(
+ "cfg,subp_called",
+ [
+ ({}, True), # Default to emitting keys
+ ({"ssh": {}}, True), # Default even if we have the parent key
+ (
+ {"ssh": {"emit_keys_to_console": True}},
+ True,
+ ), # Explicitly enabled
+ ({"ssh": {"emit_keys_to_console": False}}, False), # Disabled
+ ],
+ )
+ def test_emit_keys_to_console_config(
+ self, m_subp, m_path_exists, _m_multi_log, cfg, subp_called
+ ):
+ # Ensure we always find the helper
+ m_path_exists.return_value = True
+ m_subp.return_value = ("", "")
+
+ cc_keys_to_console.handle("name", cfg, mock.Mock(), mock.Mock(), ())
+
+ assert subp_called == (m_subp.call_count == 1)
diff --git a/tests/unittests/config/test_cc_landscape.py b/tests/unittests/config/test_cc_landscape.py
new file mode 100644
index 00000000..efddc1b6
--- /dev/null
+++ b/tests/unittests/config/test_cc_landscape.py
@@ -0,0 +1,170 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+import logging
+
+from configobj import ConfigObj
+
+from cloudinit import util
+from cloudinit.config import cc_landscape
+from tests.unittests.helpers import (
+ FilesystemMockingTestCase,
+ mock,
+ wrap_and_call,
+)
+from tests.unittests.util import get_cloud
+
+LOG = logging.getLogger(__name__)
+
+
+class TestLandscape(FilesystemMockingTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestLandscape, self).setUp()
+ self.new_root = self.tmp_dir()
+ self.conf = self.tmp_path("client.conf", self.new_root)
+ self.default_file = self.tmp_path("default_landscape", self.new_root)
+ self.patchUtils(self.new_root)
+ self.add_patch(
+ "cloudinit.distros.ubuntu.Distro.install_packages",
+ "m_install_packages",
+ )
+
+ def test_handler_skips_empty_landscape_cloudconfig(self):
+ """Empty landscape cloud-config section does no work."""
+ mycloud = get_cloud("ubuntu")
+ mycloud.distro = mock.MagicMock()
+ cfg = {"landscape": {}}
+ cc_landscape.handle("notimportant", cfg, mycloud, LOG, None)
+ self.assertFalse(mycloud.distro.install_packages.called)
+
+ def test_handler_error_on_invalid_landscape_type(self):
+ """Raise an error when landscape configuraiton option is invalid."""
+ mycloud = get_cloud("ubuntu")
+ cfg = {"landscape": "wrongtype"}
+ with self.assertRaises(RuntimeError) as context_manager:
+ cc_landscape.handle("notimportant", cfg, mycloud, LOG, None)
+ self.assertIn(
+ "'landscape' key existed in config, but not a dict",
+ str(context_manager.exception),
+ )
+
+ @mock.patch("cloudinit.config.cc_landscape.subp")
+ def test_handler_restarts_landscape_client(self, m_subp):
+ """handler restarts lansdscape-client after install."""
+ mycloud = get_cloud("ubuntu")
+ cfg = {"landscape": {"client": {}}}
+ wrap_and_call(
+ "cloudinit.config.cc_landscape",
+ {"LSC_CLIENT_CFG_FILE": {"new": self.conf}},
+ cc_landscape.handle,
+ "notimportant",
+ cfg,
+ mycloud,
+ LOG,
+ None,
+ )
+ self.assertEqual(
+ [mock.call(["service", "landscape-client", "restart"])],
+ m_subp.subp.call_args_list,
+ )
+
+ def test_handler_installs_client_and_creates_config_file(self):
+ """Write landscape client.conf and install landscape-client."""
+ mycloud = get_cloud("ubuntu")
+ cfg = {"landscape": {"client": {}}}
+ expected = {
+ "client": {
+ "log_level": "info",
+ "url": "https://landscape.canonical.com/message-system",
+ "ping_url": "http://landscape.canonical.com/ping",
+ "data_path": "/var/lib/landscape/client",
+ }
+ }
+ mycloud.distro = mock.MagicMock()
+ wrap_and_call(
+ "cloudinit.config.cc_landscape",
+ {
+ "LSC_CLIENT_CFG_FILE": {"new": self.conf},
+ "LS_DEFAULT_FILE": {"new": self.default_file},
+ },
+ cc_landscape.handle,
+ "notimportant",
+ cfg,
+ mycloud,
+ LOG,
+ None,
+ )
+ self.assertEqual(
+ [mock.call("landscape-client")],
+ mycloud.distro.install_packages.call_args,
+ )
+ self.assertEqual(expected, dict(ConfigObj(self.conf)))
+ self.assertIn(
+ "Wrote landscape config file to {0}".format(self.conf),
+ self.logs.getvalue(),
+ )
+ default_content = util.load_file(self.default_file)
+ self.assertEqual("RUN=1\n", default_content)
+
+ def test_handler_writes_merged_client_config_file_with_defaults(self):
+ """Merge and write options from LSC_CLIENT_CFG_FILE with defaults."""
+ # Write existing sparse client.conf file
+ util.write_file(self.conf, "[client]\ncomputer_title = My PC\n")
+ mycloud = get_cloud("ubuntu")
+ cfg = {"landscape": {"client": {}}}
+ expected = {
+ "client": {
+ "log_level": "info",
+ "url": "https://landscape.canonical.com/message-system",
+ "ping_url": "http://landscape.canonical.com/ping",
+ "data_path": "/var/lib/landscape/client",
+ "computer_title": "My PC",
+ }
+ }
+ wrap_and_call(
+ "cloudinit.config.cc_landscape",
+ {"LSC_CLIENT_CFG_FILE": {"new": self.conf}},
+ cc_landscape.handle,
+ "notimportant",
+ cfg,
+ mycloud,
+ LOG,
+ None,
+ )
+ self.assertEqual(expected, dict(ConfigObj(self.conf)))
+ self.assertIn(
+ "Wrote landscape config file to {0}".format(self.conf),
+ self.logs.getvalue(),
+ )
+
+ def test_handler_writes_merged_provided_cloudconfig_with_defaults(self):
+ """Merge and write options from cloud-config options with defaults."""
+ # Write empty sparse client.conf file
+ util.write_file(self.conf, "")
+ mycloud = get_cloud("ubuntu")
+ cfg = {"landscape": {"client": {"computer_title": "My PC"}}}
+ expected = {
+ "client": {
+ "log_level": "info",
+ "url": "https://landscape.canonical.com/message-system",
+ "ping_url": "http://landscape.canonical.com/ping",
+ "data_path": "/var/lib/landscape/client",
+ "computer_title": "My PC",
+ }
+ }
+ wrap_and_call(
+ "cloudinit.config.cc_landscape",
+ {"LSC_CLIENT_CFG_FILE": {"new": self.conf}},
+ cc_landscape.handle,
+ "notimportant",
+ cfg,
+ mycloud,
+ LOG,
+ None,
+ )
+ self.assertEqual(expected, dict(ConfigObj(self.conf)))
+ self.assertIn(
+ "Wrote landscape config file to {0}".format(self.conf),
+ self.logs.getvalue(),
+ )
diff --git a/tests/unittests/config/test_cc_locale.py b/tests/unittests/config/test_cc_locale.py
new file mode 100644
index 00000000..7190bc68
--- /dev/null
+++ b/tests/unittests/config/test_cc_locale.py
@@ -0,0 +1,123 @@
+# Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
+#
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+import logging
+import os
+import shutil
+import tempfile
+from io import BytesIO
+from unittest import mock
+
+from configobj import ConfigObj
+
+from cloudinit import util
+from cloudinit.config import cc_locale
+from tests.unittests import helpers as t_help
+from tests.unittests.util import get_cloud
+
+LOG = logging.getLogger(__name__)
+
+
+class TestLocale(t_help.FilesystemMockingTestCase):
+ def setUp(self):
+ super(TestLocale, self).setUp()
+ self.new_root = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, self.new_root)
+ self.patchUtils(self.new_root)
+
+ def test_set_locale_arch(self):
+ locale = "en_GB.UTF-8"
+ locale_configfile = "/etc/invalid-locale-path"
+ cfg = {
+ "locale": locale,
+ "locale_configfile": locale_configfile,
+ }
+ cc = get_cloud("arch")
+
+ with mock.patch("cloudinit.distros.arch.subp.subp") as m_subp:
+ with mock.patch("cloudinit.distros.arch.LOG.warning") as m_LOG:
+ cc_locale.handle("cc_locale", cfg, cc, LOG, [])
+ m_LOG.assert_called_with(
+ "Invalid locale_configfile %s, "
+ "only supported value is "
+ "/etc/locale.conf",
+ locale_configfile,
+ )
+
+ contents = util.load_file(cc.distro.locale_gen_fn)
+ self.assertIn("%s UTF-8" % locale, contents)
+ m_subp.assert_called_with(
+ ["localectl", "set-locale", locale], capture=False
+ )
+
+ def test_set_locale_sles(self):
+
+ cfg = {
+ "locale": "My.Locale",
+ }
+ cc = get_cloud("sles")
+ cc_locale.handle("cc_locale", cfg, cc, LOG, [])
+ if cc.distro.uses_systemd():
+ locale_conf = cc.distro.systemd_locale_conf_fn
+ else:
+ locale_conf = cc.distro.locale_conf_fn
+ contents = util.load_file(locale_conf, decode=False)
+ n_cfg = ConfigObj(BytesIO(contents))
+ if cc.distro.uses_systemd():
+ self.assertEqual({"LANG": cfg["locale"]}, dict(n_cfg))
+ else:
+ self.assertEqual({"RC_LANG": cfg["locale"]}, dict(n_cfg))
+
+ def test_set_locale_sles_default(self):
+ cfg = {}
+ cc = get_cloud("sles")
+ cc_locale.handle("cc_locale", cfg, cc, LOG, [])
+
+ if cc.distro.uses_systemd():
+ locale_conf = cc.distro.systemd_locale_conf_fn
+ keyname = "LANG"
+ else:
+ locale_conf = cc.distro.locale_conf_fn
+ keyname = "RC_LANG"
+
+ contents = util.load_file(locale_conf, decode=False)
+ n_cfg = ConfigObj(BytesIO(contents))
+ self.assertEqual({keyname: "en_US.UTF-8"}, dict(n_cfg))
+
+ def test_locale_update_config_if_different_than_default(self):
+ """Test cc_locale writes updates conf if different than default"""
+ locale_conf = os.path.join(self.new_root, "etc/default/locale")
+ util.write_file(locale_conf, 'LANG="en_US.UTF-8"\n')
+ cfg = {"locale": "C.UTF-8"}
+ cc = get_cloud("ubuntu")
+ with mock.patch("cloudinit.distros.debian.subp.subp") as m_subp:
+ with mock.patch(
+ "cloudinit.distros.debian.LOCALE_CONF_FN", locale_conf
+ ):
+ cc_locale.handle("cc_locale", cfg, cc, LOG, [])
+ m_subp.assert_called_with(
+ [
+ "update-locale",
+ "--locale-file=%s" % locale_conf,
+ "LANG=C.UTF-8",
+ ],
+ capture=False,
+ )
+
+ def test_locale_rhel_defaults_en_us_utf8(self):
+ """Test cc_locale gets en_US.UTF-8 from distro get_locale fallback"""
+ cfg = {}
+ cc = get_cloud("rhel")
+ update_sysconfig = "cloudinit.distros.rhel_util.update_sysconfig_file"
+ with mock.patch.object(cc.distro, "uses_systemd") as m_use_sd:
+ m_use_sd.return_value = True
+ with mock.patch(update_sysconfig) as m_update_syscfg:
+ cc_locale.handle("cc_locale", cfg, cc, LOG, [])
+ m_update_syscfg.assert_called_with(
+ "/etc/locale.conf", {"LANG": "en_US.UTF-8"}
+ )
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_lxd.py b/tests/unittests/config/test_cc_lxd.py
new file mode 100644
index 00000000..720274d6
--- /dev/null
+++ b/tests/unittests/config/test_cc_lxd.py
@@ -0,0 +1,272 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+from unittest import mock
+
+from cloudinit.config import cc_lxd
+from tests.unittests import helpers as t_help
+from tests.unittests.util import get_cloud
+
+
+class TestLxd(t_help.CiTestCase):
+
+ with_logs = True
+
+ lxd_cfg = {
+ "lxd": {
+ "init": {
+ "network_address": "0.0.0.0",
+ "storage_backend": "zfs",
+ "storage_pool": "poolname",
+ }
+ }
+ }
+
+ @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default")
+ @mock.patch("cloudinit.config.cc_lxd.subp")
+ def test_lxd_init(self, mock_subp, m_maybe_clean):
+ cc = get_cloud()
+ mock_subp.which.return_value = True
+ m_maybe_clean.return_value = None
+ cc_lxd.handle("cc_lxd", self.lxd_cfg, cc, self.logger, [])
+ self.assertTrue(mock_subp.which.called)
+ # no bridge config, so maybe_cleanup should not be called.
+ self.assertFalse(m_maybe_clean.called)
+ self.assertEqual(
+ [
+ mock.call(["lxd", "waitready", "--timeout=300"]),
+ mock.call(
+ [
+ "lxd",
+ "init",
+ "--auto",
+ "--network-address=0.0.0.0",
+ "--storage-backend=zfs",
+ "--storage-pool=poolname",
+ ]
+ ),
+ ],
+ mock_subp.subp.call_args_list,
+ )
+
+ @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default")
+ @mock.patch("cloudinit.config.cc_lxd.subp")
+ def test_lxd_install(self, mock_subp, m_maybe_clean):
+ cc = get_cloud()
+ cc.distro = mock.MagicMock()
+ mock_subp.which.return_value = None
+ cc_lxd.handle("cc_lxd", self.lxd_cfg, cc, self.logger, [])
+ self.assertNotIn("WARN", self.logs.getvalue())
+ self.assertTrue(cc.distro.install_packages.called)
+ cc_lxd.handle("cc_lxd", self.lxd_cfg, cc, self.logger, [])
+ self.assertFalse(m_maybe_clean.called)
+ install_pkg = cc.distro.install_packages.call_args_list[0][0][0]
+ self.assertEqual(sorted(install_pkg), ["lxd", "zfsutils-linux"])
+
+ @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default")
+ @mock.patch("cloudinit.config.cc_lxd.subp")
+ def test_no_init_does_nothing(self, mock_subp, m_maybe_clean):
+ cc = get_cloud()
+ cc.distro = mock.MagicMock()
+ cc_lxd.handle("cc_lxd", {"lxd": {}}, cc, self.logger, [])
+ self.assertFalse(cc.distro.install_packages.called)
+ self.assertFalse(mock_subp.subp.called)
+ self.assertFalse(m_maybe_clean.called)
+
+ @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default")
+ @mock.patch("cloudinit.config.cc_lxd.subp")
+ def test_no_lxd_does_nothing(self, mock_subp, m_maybe_clean):
+ cc = get_cloud()
+ cc.distro = mock.MagicMock()
+ cc_lxd.handle("cc_lxd", {"package_update": True}, cc, self.logger, [])
+ self.assertFalse(cc.distro.install_packages.called)
+ self.assertFalse(mock_subp.subp.called)
+ self.assertFalse(m_maybe_clean.called)
+
+ def test_lxd_debconf_new_full(self):
+ data = {
+ "mode": "new",
+ "name": "testbr0",
+ "ipv4_address": "10.0.8.1",
+ "ipv4_netmask": "24",
+ "ipv4_dhcp_first": "10.0.8.2",
+ "ipv4_dhcp_last": "10.0.8.254",
+ "ipv4_dhcp_leases": "250",
+ "ipv4_nat": "true",
+ "ipv6_address": "fd98:9e0:3744::1",
+ "ipv6_netmask": "64",
+ "ipv6_nat": "true",
+ "domain": "lxd",
+ }
+ self.assertEqual(
+ cc_lxd.bridge_to_debconf(data),
+ {
+ "lxd/setup-bridge": "true",
+ "lxd/bridge-name": "testbr0",
+ "lxd/bridge-ipv4": "true",
+ "lxd/bridge-ipv4-address": "10.0.8.1",
+ "lxd/bridge-ipv4-netmask": "24",
+ "lxd/bridge-ipv4-dhcp-first": "10.0.8.2",
+ "lxd/bridge-ipv4-dhcp-last": "10.0.8.254",
+ "lxd/bridge-ipv4-dhcp-leases": "250",
+ "lxd/bridge-ipv4-nat": "true",
+ "lxd/bridge-ipv6": "true",
+ "lxd/bridge-ipv6-address": "fd98:9e0:3744::1",
+ "lxd/bridge-ipv6-netmask": "64",
+ "lxd/bridge-ipv6-nat": "true",
+ "lxd/bridge-domain": "lxd",
+ },
+ )
+
+ def test_lxd_debconf_new_partial(self):
+ data = {
+ "mode": "new",
+ "ipv6_address": "fd98:9e0:3744::1",
+ "ipv6_netmask": "64",
+ "ipv6_nat": "true",
+ }
+ self.assertEqual(
+ cc_lxd.bridge_to_debconf(data),
+ {
+ "lxd/setup-bridge": "true",
+ "lxd/bridge-ipv6": "true",
+ "lxd/bridge-ipv6-address": "fd98:9e0:3744::1",
+ "lxd/bridge-ipv6-netmask": "64",
+ "lxd/bridge-ipv6-nat": "true",
+ },
+ )
+
+ def test_lxd_debconf_existing(self):
+ data = {"mode": "existing", "name": "testbr0"}
+ self.assertEqual(
+ cc_lxd.bridge_to_debconf(data),
+ {
+ "lxd/setup-bridge": "false",
+ "lxd/use-existing-bridge": "true",
+ "lxd/bridge-name": "testbr0",
+ },
+ )
+
+ def test_lxd_debconf_none(self):
+ data = {"mode": "none"}
+ self.assertEqual(
+ cc_lxd.bridge_to_debconf(data),
+ {"lxd/setup-bridge": "false", "lxd/bridge-name": ""},
+ )
+
+ def test_lxd_cmd_new_full(self):
+ data = {
+ "mode": "new",
+ "name": "testbr0",
+ "ipv4_address": "10.0.8.1",
+ "ipv4_netmask": "24",
+ "ipv4_dhcp_first": "10.0.8.2",
+ "ipv4_dhcp_last": "10.0.8.254",
+ "ipv4_dhcp_leases": "250",
+ "ipv4_nat": "true",
+ "ipv6_address": "fd98:9e0:3744::1",
+ "ipv6_netmask": "64",
+ "ipv6_nat": "true",
+ "domain": "lxd",
+ }
+ self.assertEqual(
+ cc_lxd.bridge_to_cmd(data),
+ (
+ [
+ "network",
+ "create",
+ "testbr0",
+ "ipv4.address=10.0.8.1/24",
+ "ipv4.nat=true",
+ "ipv4.dhcp.ranges=10.0.8.2-10.0.8.254",
+ "ipv6.address=fd98:9e0:3744::1/64",
+ "ipv6.nat=true",
+ "dns.domain=lxd",
+ ],
+ ["network", "attach-profile", "testbr0", "default", "eth0"],
+ ),
+ )
+
+ def test_lxd_cmd_new_partial(self):
+ data = {
+ "mode": "new",
+ "ipv6_address": "fd98:9e0:3744::1",
+ "ipv6_netmask": "64",
+ "ipv6_nat": "true",
+ }
+ self.assertEqual(
+ cc_lxd.bridge_to_cmd(data),
+ (
+ [
+ "network",
+ "create",
+ "lxdbr0",
+ "ipv4.address=none",
+ "ipv6.address=fd98:9e0:3744::1/64",
+ "ipv6.nat=true",
+ ],
+ ["network", "attach-profile", "lxdbr0", "default", "eth0"],
+ ),
+ )
+
+ def test_lxd_cmd_existing(self):
+ data = {"mode": "existing", "name": "testbr0"}
+ self.assertEqual(
+ cc_lxd.bridge_to_cmd(data),
+ (
+ None,
+ ["network", "attach-profile", "testbr0", "default", "eth0"],
+ ),
+ )
+
+ def test_lxd_cmd_none(self):
+ data = {"mode": "none"}
+ self.assertEqual(cc_lxd.bridge_to_cmd(data), (None, None))
+
+
+class TestLxdMaybeCleanupDefault(t_help.CiTestCase):
+ """Test the implementation of maybe_cleanup_default."""
+
+ defnet = cc_lxd._DEFAULT_NETWORK_NAME
+
+ @mock.patch("cloudinit.config.cc_lxd._lxc")
+ def test_network_other_than_default_not_deleted(self, m_lxc):
+ """deletion or removal should only occur if bridge is default."""
+ cc_lxd.maybe_cleanup_default(
+ net_name="lxdbr1", did_init=True, create=True, attach=True
+ )
+ m_lxc.assert_not_called()
+
+ @mock.patch("cloudinit.config.cc_lxd._lxc")
+ def test_did_init_false_does_not_delete(self, m_lxc):
+ """deletion or removal should only occur if did_init is True."""
+ cc_lxd.maybe_cleanup_default(
+ net_name=self.defnet, did_init=False, create=True, attach=True
+ )
+ m_lxc.assert_not_called()
+
+ @mock.patch("cloudinit.config.cc_lxd._lxc")
+ def test_network_deleted_if_create_true(self, m_lxc):
+ """deletion of network should occur if create is True."""
+ cc_lxd.maybe_cleanup_default(
+ net_name=self.defnet, did_init=True, create=True, attach=False
+ )
+ m_lxc.assert_called_with(["network", "delete", self.defnet])
+
+ @mock.patch("cloudinit.config.cc_lxd._lxc")
+ def test_device_removed_if_attach_true(self, m_lxc):
+ """deletion of network should occur if create is True."""
+ nic_name = "my_nic"
+ profile = "my_profile"
+ cc_lxd.maybe_cleanup_default(
+ net_name=self.defnet,
+ did_init=True,
+ create=False,
+ attach=True,
+ profile=profile,
+ nic_name=nic_name,
+ )
+ m_lxc.assert_called_once_with(
+ ["profile", "device", "remove", profile, nic_name]
+ )
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_mcollective.py b/tests/unittests/config/test_cc_mcollective.py
index 6891e15f..5cbdeb76 100644
--- a/tests/unittests/test_handler/test_handler_mcollective.py
+++ b/tests/unittests/config/test_cc_mcollective.py
@@ -1,18 +1,17 @@
# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit import (cloud, distros, helpers, util)
-from cloudinit.config import cc_mcollective
-from cloudinit.sources import DataSourceNoCloud
-
-from cloudinit.tests import helpers as t_help
-
-import configobj
import logging
import os
import shutil
import tempfile
from io import BytesIO
+import configobj
+
+from cloudinit import util
+from cloudinit.config import cc_mcollective
+from tests.unittests import helpers as t_help
+from tests.unittests.util import get_cloud
+
LOG = logging.getLogger(__name__)
@@ -48,108 +47,112 @@ class TestConfig(t_help.FilesystemMockingTestCase):
self.addCleanup(shutil.rmtree, self.tmp)
# "./": make os.path.join behave correctly with abs path as second arg
self.server_cfg = os.path.join(
- self.tmp, "./" + cc_mcollective.SERVER_CFG)
+ self.tmp, "./" + cc_mcollective.SERVER_CFG
+ )
self.pubcert_file = os.path.join(
- self.tmp, "./" + cc_mcollective.PUBCERT_FILE)
+ self.tmp, "./" + cc_mcollective.PUBCERT_FILE
+ )
self.pricert_file = os.path.join(
- self.tmp, self.tmp, "./" + cc_mcollective.PRICERT_FILE)
+ self.tmp, self.tmp, "./" + cc_mcollective.PRICERT_FILE
+ )
def test_basic_config(self):
cfg = {
- 'mcollective': {
- 'conf': {
- 'loglevel': 'debug',
- 'connector': 'rabbitmq',
- 'logfile': '/var/log/mcollective.log',
- 'ttl': '4294957',
- 'collectives': 'mcollective',
- 'main_collective': 'mcollective',
- 'securityprovider': 'psk',
- 'daemonize': '1',
- 'factsource': 'yaml',
- 'direct_addressing': '1',
- 'plugin.psk': 'unset',
- 'libdir': '/usr/share/mcollective/plugins',
- 'identity': '1',
+ "mcollective": {
+ "conf": {
+ "loglevel": "debug",
+ "connector": "rabbitmq",
+ "logfile": "/var/log/mcollective.log",
+ "ttl": "4294957",
+ "collectives": "mcollective",
+ "main_collective": "mcollective",
+ "securityprovider": "psk",
+ "daemonize": "1",
+ "factsource": "yaml",
+ "direct_addressing": "1",
+ "plugin.psk": "unset",
+ "libdir": "/usr/share/mcollective/plugins",
+ "identity": "1",
},
},
}
- expected = cfg['mcollective']['conf']
+ expected = cfg["mcollective"]["conf"]
self.patchUtils(self.tmp)
- cc_mcollective.configure(cfg['mcollective']['conf'])
+ cc_mcollective.configure(cfg["mcollective"]["conf"])
contents = util.load_file(cc_mcollective.SERVER_CFG, decode=False)
contents = configobj.ConfigObj(BytesIO(contents))
self.assertEqual(expected, dict(contents))
def test_existing_config_is_saved(self):
- cfg = {'loglevel': 'warn'}
+ cfg = {"loglevel": "warn"}
util.write_file(self.server_cfg, STOCK_CONFIG)
cc_mcollective.configure(config=cfg, server_cfg=self.server_cfg)
self.assertTrue(os.path.exists(self.server_cfg))
self.assertTrue(os.path.exists(self.server_cfg + ".old"))
- self.assertEqual(util.load_file(self.server_cfg + ".old"),
- STOCK_CONFIG)
+ self.assertEqual(
+ util.load_file(self.server_cfg + ".old"), STOCK_CONFIG
+ )
def test_existing_updated(self):
- cfg = {'loglevel': 'warn'}
+ cfg = {"loglevel": "warn"}
util.write_file(self.server_cfg, STOCK_CONFIG)
cc_mcollective.configure(config=cfg, server_cfg=self.server_cfg)
cfgobj = configobj.ConfigObj(self.server_cfg)
- self.assertEqual(cfg['loglevel'], cfgobj['loglevel'])
+ self.assertEqual(cfg["loglevel"], cfgobj["loglevel"])
def test_certificats_written(self):
# check public-cert and private-cert keys in config get written
- cfg = {'loglevel': 'debug',
- 'public-cert': "this is my public-certificate",
- 'private-cert': "secret private certificate"}
+ cfg = {
+ "loglevel": "debug",
+ "public-cert": "this is my public-certificate",
+ "private-cert": "secret private certificate",
+ }
cc_mcollective.configure(
- config=cfg, server_cfg=self.server_cfg,
- pricert_file=self.pricert_file, pubcert_file=self.pubcert_file)
+ config=cfg,
+ server_cfg=self.server_cfg,
+ pricert_file=self.pricert_file,
+ pubcert_file=self.pubcert_file,
+ )
found = configobj.ConfigObj(self.server_cfg)
# make sure these didnt get written in
- self.assertFalse('public-cert' in found)
- self.assertFalse('private-cert' in found)
+ self.assertFalse("public-cert" in found)
+ self.assertFalse("private-cert" in found)
# these need updating to the specified paths
- self.assertEqual(found['plugin.ssl_server_public'], self.pubcert_file)
- self.assertEqual(found['plugin.ssl_server_private'], self.pricert_file)
+ self.assertEqual(found["plugin.ssl_server_public"], self.pubcert_file)
+ self.assertEqual(found["plugin.ssl_server_private"], self.pricert_file)
# and the security provider should be ssl
- self.assertEqual(found['securityprovider'], 'ssl')
+ self.assertEqual(found["securityprovider"], "ssl")
self.assertEqual(
- util.load_file(self.pricert_file), cfg['private-cert'])
- self.assertEqual(
- util.load_file(self.pubcert_file), cfg['public-cert'])
+ util.load_file(self.pricert_file), cfg["private-cert"]
+ )
+ self.assertEqual(util.load_file(self.pubcert_file), cfg["public-cert"])
class TestHandler(t_help.TestCase):
- def _get_cloud(self, distro):
- cls = distros.fetch(distro)
- paths = helpers.Paths({})
- d = cls(distro, {}, paths)
- ds = DataSourceNoCloud.DataSourceNoCloud({}, d, paths)
- cc = cloud.Cloud(ds, paths, {}, d, None)
- return cc
-
@t_help.mock.patch("cloudinit.config.cc_mcollective.subp")
@t_help.mock.patch("cloudinit.config.cc_mcollective.util")
def test_mcollective_install(self, mock_util, mock_subp):
- cc = self._get_cloud('ubuntu')
+ cc = get_cloud()
cc.distro = t_help.mock.MagicMock()
mock_util.load_file.return_value = b""
- mycfg = {'mcollective': {'conf': {'loglevel': 'debug'}}}
- cc_mcollective.handle('cc_mcollective', mycfg, cc, LOG, [])
+ mycfg = {"mcollective": {"conf": {"loglevel": "debug"}}}
+ cc_mcollective.handle("cc_mcollective", mycfg, cc, LOG, [])
self.assertTrue(cc.distro.install_packages.called)
install_pkg = cc.distro.install_packages.call_args_list[0][0][0]
- self.assertEqual(install_pkg, ('mcollective',))
+ self.assertEqual(install_pkg, ("mcollective",))
self.assertTrue(mock_subp.subp.called)
- self.assertEqual(mock_subp.subp.call_args_list[0][0][0],
- ['service', 'mcollective', 'restart'])
+ self.assertEqual(
+ mock_subp.subp.call_args_list[0][0][0],
+ ["service", "mcollective", "restart"],
+ )
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_mounts.py b/tests/unittests/config/test_cc_mounts.py
new file mode 100644
index 00000000..084faacd
--- /dev/null
+++ b/tests/unittests/config/test_cc_mounts.py
@@ -0,0 +1,522 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import os.path
+from unittest import mock
+
+import pytest
+
+from cloudinit.config import cc_mounts
+from cloudinit.config.cc_mounts import create_swapfile
+from cloudinit.subp import ProcessExecutionError
+from tests.unittests import helpers as test_helpers
+
+M_PATH = "cloudinit.config.cc_mounts."
+
+
+class TestSanitizeDevname(test_helpers.FilesystemMockingTestCase):
+ def setUp(self):
+ super(TestSanitizeDevname, self).setUp()
+ self.new_root = self.tmp_dir()
+ self.patchOS(self.new_root)
+
+ def _touch(self, path):
+ path = os.path.join(self.new_root, path.lstrip("/"))
+ basedir = os.path.dirname(path)
+ if not os.path.exists(basedir):
+ os.makedirs(basedir)
+ open(path, "a").close()
+
+ def _makedirs(self, directory):
+ directory = os.path.join(self.new_root, directory.lstrip("/"))
+ if not os.path.exists(directory):
+ os.makedirs(directory)
+
+ def mock_existence_of_disk(self, disk_path):
+ self._touch(disk_path)
+ self._makedirs(os.path.join("/sys/block", disk_path.split("/")[-1]))
+
+ def mock_existence_of_partition(self, disk_path, partition_number):
+ self.mock_existence_of_disk(disk_path)
+ self._touch(disk_path + str(partition_number))
+ disk_name = disk_path.split("/")[-1]
+ self._makedirs(
+ os.path.join(
+ "/sys/block", disk_name, disk_name + str(partition_number)
+ )
+ )
+
+ def test_existent_full_disk_path_is_returned(self):
+ disk_path = "/dev/sda"
+ self.mock_existence_of_disk(disk_path)
+ self.assertEqual(
+ disk_path,
+ cc_mounts.sanitize_devname(disk_path, lambda x: None, mock.Mock()),
+ )
+
+ def test_existent_disk_name_returns_full_path(self):
+ disk_name = "sda"
+ disk_path = "/dev/" + disk_name
+ self.mock_existence_of_disk(disk_path)
+ self.assertEqual(
+ disk_path,
+ cc_mounts.sanitize_devname(disk_name, lambda x: None, mock.Mock()),
+ )
+
+ def test_existent_meta_disk_is_returned(self):
+ actual_disk_path = "/dev/sda"
+ self.mock_existence_of_disk(actual_disk_path)
+ self.assertEqual(
+ actual_disk_path,
+ cc_mounts.sanitize_devname(
+ "ephemeral0", lambda x: actual_disk_path, mock.Mock()
+ ),
+ )
+
+ def test_existent_meta_partition_is_returned(self):
+ disk_name, partition_part = "/dev/sda", "1"
+ actual_partition_path = disk_name + partition_part
+ self.mock_existence_of_partition(disk_name, partition_part)
+ self.assertEqual(
+ actual_partition_path,
+ cc_mounts.sanitize_devname(
+ "ephemeral0.1", lambda x: disk_name, mock.Mock()
+ ),
+ )
+
+ def test_existent_meta_partition_with_p_is_returned(self):
+ disk_name, partition_part = "/dev/sda", "p1"
+ actual_partition_path = disk_name + partition_part
+ self.mock_existence_of_partition(disk_name, partition_part)
+ self.assertEqual(
+ actual_partition_path,
+ cc_mounts.sanitize_devname(
+ "ephemeral0.1", lambda x: disk_name, mock.Mock()
+ ),
+ )
+
+ def test_first_partition_returned_if_existent_disk_is_partitioned(self):
+ disk_name, partition_part = "/dev/sda", "1"
+ actual_partition_path = disk_name + partition_part
+ self.mock_existence_of_partition(disk_name, partition_part)
+ self.assertEqual(
+ actual_partition_path,
+ cc_mounts.sanitize_devname(
+ "ephemeral0", lambda x: disk_name, mock.Mock()
+ ),
+ )
+
+ def test_nth_partition_returned_if_requested(self):
+ disk_name, partition_part = "/dev/sda", "3"
+ actual_partition_path = disk_name + partition_part
+ self.mock_existence_of_partition(disk_name, partition_part)
+ self.assertEqual(
+ actual_partition_path,
+ cc_mounts.sanitize_devname(
+ "ephemeral0.3", lambda x: disk_name, mock.Mock()
+ ),
+ )
+
+ def test_transformer_returning_none_returns_none(self):
+ self.assertIsNone(
+ cc_mounts.sanitize_devname(
+ "ephemeral0", lambda x: None, mock.Mock()
+ )
+ )
+
+ def test_missing_device_returns_none(self):
+ self.assertIsNone(
+ cc_mounts.sanitize_devname("/dev/sda", None, mock.Mock())
+ )
+
+ def test_missing_sys_returns_none(self):
+ disk_path = "/dev/sda"
+ self._makedirs(disk_path)
+ self.assertIsNone(
+ cc_mounts.sanitize_devname(disk_path, None, mock.Mock())
+ )
+
+ def test_existent_disk_but_missing_partition_returns_none(self):
+ disk_path = "/dev/sda"
+ self.mock_existence_of_disk(disk_path)
+ self.assertIsNone(
+ cc_mounts.sanitize_devname(
+ "ephemeral0.1", lambda x: disk_path, mock.Mock()
+ )
+ )
+
+ def test_network_device_returns_network_device(self):
+ disk_path = "netdevice:/path"
+ self.assertEqual(
+ disk_path, cc_mounts.sanitize_devname(disk_path, None, mock.Mock())
+ )
+
+ def test_device_aliases_remapping(self):
+ disk_path = "/dev/sda"
+ self.mock_existence_of_disk(disk_path)
+ self.assertEqual(
+ disk_path,
+ cc_mounts.sanitize_devname(
+ "mydata", lambda x: None, mock.Mock(), {"mydata": disk_path}
+ ),
+ )
+
+
+class TestSwapFileCreation(test_helpers.FilesystemMockingTestCase):
+ def setUp(self):
+ super(TestSwapFileCreation, self).setUp()
+ self.new_root = self.tmp_dir()
+ self.patchOS(self.new_root)
+
+ self.fstab_path = os.path.join(self.new_root, "etc/fstab")
+ self.swap_path = os.path.join(self.new_root, "swap.img")
+ self._makedirs("/etc")
+
+ self.add_patch(
+ "cloudinit.config.cc_mounts.FSTAB_PATH",
+ "mock_fstab_path",
+ self.fstab_path,
+ autospec=False,
+ )
+
+ self.add_patch("cloudinit.config.cc_mounts.subp.subp", "m_subp_subp")
+
+ self.add_patch(
+ "cloudinit.config.cc_mounts.util.mounts",
+ "mock_util_mounts",
+ return_value={
+ "/dev/sda1": {
+ "fstype": "ext4",
+ "mountpoint": "/",
+ "opts": "rw,relatime,discard",
+ }
+ },
+ )
+
+ self.mock_cloud = mock.Mock()
+ self.mock_log = mock.Mock()
+ self.mock_cloud.device_name_to_device = self.device_name_to_device
+
+ self.cc = {
+ "swap": {
+ "filename": self.swap_path,
+ "size": "512",
+ "maxsize": "512",
+ }
+ }
+
+ def _makedirs(self, directory):
+ directory = os.path.join(self.new_root, directory.lstrip("/"))
+ if not os.path.exists(directory):
+ os.makedirs(directory)
+
+ def device_name_to_device(self, path):
+ if path == "swap":
+ return self.swap_path
+ else:
+ dev = None
+
+ return dev
+
+ @mock.patch("cloudinit.util.get_mount_info")
+ @mock.patch("cloudinit.util.kernel_version")
+ def test_swap_creation_method_fallocate_on_xfs(
+ self, m_kernel_version, m_get_mount_info
+ ):
+ m_kernel_version.return_value = (4, 20)
+ m_get_mount_info.return_value = ["", "xfs"]
+
+ cc_mounts.handle(None, self.cc, self.mock_cloud, self.mock_log, [])
+ self.m_subp_subp.assert_has_calls(
+ [
+ mock.call(
+ ["fallocate", "-l", "0M", self.swap_path], capture=True
+ ),
+ mock.call(["mkswap", self.swap_path]),
+ mock.call(["swapon", "-a"]),
+ ]
+ )
+
+ @mock.patch("cloudinit.util.get_mount_info")
+ @mock.patch("cloudinit.util.kernel_version")
+ def test_swap_creation_method_xfs(
+ self, m_kernel_version, m_get_mount_info
+ ):
+ m_kernel_version.return_value = (3, 18)
+ m_get_mount_info.return_value = ["", "xfs"]
+
+ cc_mounts.handle(None, self.cc, self.mock_cloud, self.mock_log, [])
+ self.m_subp_subp.assert_has_calls(
+ [
+ mock.call(
+ [
+ "dd",
+ "if=/dev/zero",
+ "of=" + self.swap_path,
+ "bs=1M",
+ "count=0",
+ ],
+ capture=True,
+ ),
+ mock.call(["mkswap", self.swap_path]),
+ mock.call(["swapon", "-a"]),
+ ]
+ )
+
+ @mock.patch("cloudinit.util.get_mount_info")
+ @mock.patch("cloudinit.util.kernel_version")
+ def test_swap_creation_method_btrfs(
+ self, m_kernel_version, m_get_mount_info
+ ):
+ m_kernel_version.return_value = (4, 20)
+ m_get_mount_info.return_value = ["", "btrfs"]
+
+ cc_mounts.handle(None, self.cc, self.mock_cloud, self.mock_log, [])
+ self.m_subp_subp.assert_has_calls(
+ [
+ mock.call(
+ [
+ "dd",
+ "if=/dev/zero",
+ "of=" + self.swap_path,
+ "bs=1M",
+ "count=0",
+ ],
+ capture=True,
+ ),
+ mock.call(["mkswap", self.swap_path]),
+ mock.call(["swapon", "-a"]),
+ ]
+ )
+
+ @mock.patch("cloudinit.util.get_mount_info")
+ @mock.patch("cloudinit.util.kernel_version")
+ def test_swap_creation_method_ext4(
+ self, m_kernel_version, m_get_mount_info
+ ):
+ m_kernel_version.return_value = (5, 14)
+ m_get_mount_info.return_value = ["", "ext4"]
+
+ cc_mounts.handle(None, self.cc, self.mock_cloud, self.mock_log, [])
+ self.m_subp_subp.assert_has_calls(
+ [
+ mock.call(
+ ["fallocate", "-l", "0M", self.swap_path], capture=True
+ ),
+ mock.call(["mkswap", self.swap_path]),
+ mock.call(["swapon", "-a"]),
+ ]
+ )
+
+
+class TestFstabHandling(test_helpers.FilesystemMockingTestCase):
+
+ swap_path = "/dev/sdb1"
+
+ def setUp(self):
+ super(TestFstabHandling, self).setUp()
+ self.new_root = self.tmp_dir()
+ self.patchOS(self.new_root)
+
+ self.fstab_path = os.path.join(self.new_root, "etc/fstab")
+ self._makedirs("/etc")
+
+ self.add_patch(
+ "cloudinit.config.cc_mounts.FSTAB_PATH",
+ "mock_fstab_path",
+ self.fstab_path,
+ autospec=False,
+ )
+
+ self.add_patch(
+ "cloudinit.config.cc_mounts._is_block_device",
+ "mock_is_block_device",
+ return_value=True,
+ )
+
+ self.add_patch("cloudinit.config.cc_mounts.subp.subp", "m_subp_subp")
+
+ self.add_patch(
+ "cloudinit.config.cc_mounts.util.mounts",
+ "mock_util_mounts",
+ return_value={
+ "/dev/sda1": {
+ "fstype": "ext4",
+ "mountpoint": "/",
+ "opts": "rw,relatime,discard",
+ }
+ },
+ )
+
+ self.mock_cloud = mock.Mock()
+ self.mock_log = mock.Mock()
+ self.mock_cloud.device_name_to_device = self.device_name_to_device
+
+ def _makedirs(self, directory):
+ directory = os.path.join(self.new_root, directory.lstrip("/"))
+ if not os.path.exists(directory):
+ os.makedirs(directory)
+
+ def device_name_to_device(self, path):
+ if path == "swap":
+ return self.swap_path
+ else:
+ dev = None
+
+ return dev
+
+ def test_no_fstab(self):
+ """Handle images which do not include an fstab."""
+ self.assertFalse(os.path.exists(cc_mounts.FSTAB_PATH))
+ fstab_expected_content = (
+ "%s\tnone\tswap\tsw,comment=cloudconfig\t0\t0\n"
+ % (self.swap_path,)
+ )
+ cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, [])
+ with open(cc_mounts.FSTAB_PATH, "r") as fd:
+ fstab_new_content = fd.read()
+ self.assertEqual(fstab_expected_content, fstab_new_content)
+
+ def test_swap_integrity(self):
+ """Ensure that the swap file is correctly created and can
+ swapon successfully. Fixing the corner case of:
+ kernel: swapon: swapfile has holes"""
+
+ fstab = "/swap.img swap swap defaults 0 0\n"
+
+ with open(cc_mounts.FSTAB_PATH, "w") as fd:
+ fd.write(fstab)
+ cc = {"swap": ["filename: /swap.img", "size: 512", "maxsize: 512"]}
+ cc_mounts.handle(None, cc, self.mock_cloud, self.mock_log, [])
+
+ def test_fstab_no_swap_device(self):
+ """Ensure that cloud-init adds a discovered swap partition
+ to /etc/fstab."""
+
+ fstab_original_content = ""
+ fstab_expected_content = (
+ "%s\tnone\tswap\tsw,comment=cloudconfig\t0\t0\n"
+ % (self.swap_path,)
+ )
+
+ with open(cc_mounts.FSTAB_PATH, "w") as fd:
+ fd.write(fstab_original_content)
+
+ cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, [])
+
+ with open(cc_mounts.FSTAB_PATH, "r") as fd:
+ fstab_new_content = fd.read()
+ self.assertEqual(fstab_expected_content, fstab_new_content)
+
+ def test_fstab_same_swap_device_already_configured(self):
+ """Ensure that cloud-init will not add a swap device if the same
+ device already exists in /etc/fstab."""
+
+ fstab_original_content = "%s swap swap defaults 0 0\n" % (
+ self.swap_path,
+ )
+ fstab_expected_content = fstab_original_content
+
+ with open(cc_mounts.FSTAB_PATH, "w") as fd:
+ fd.write(fstab_original_content)
+
+ cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, [])
+
+ with open(cc_mounts.FSTAB_PATH, "r") as fd:
+ fstab_new_content = fd.read()
+ self.assertEqual(fstab_expected_content, fstab_new_content)
+
+ def test_fstab_alternate_swap_device_already_configured(self):
+ """Ensure that cloud-init will add a discovered swap device to
+ /etc/fstab even when there exists a swap definition on another
+ device."""
+
+ fstab_original_content = "/dev/sdc1 swap swap defaults 0 0\n"
+ fstab_expected_content = (
+ fstab_original_content
+ + "%s\tnone\tswap\tsw,comment=cloudconfig\t0\t0\n"
+ % (self.swap_path,)
+ )
+
+ with open(cc_mounts.FSTAB_PATH, "w") as fd:
+ fd.write(fstab_original_content)
+
+ cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, [])
+
+ with open(cc_mounts.FSTAB_PATH, "r") as fd:
+ fstab_new_content = fd.read()
+ self.assertEqual(fstab_expected_content, fstab_new_content)
+
+ def test_no_change_fstab_sets_needs_mount_all(self):
+ """verify unchanged fstab entries are mounted if not call mount -a"""
+ fstab_original_content = (
+ "LABEL=cloudimg-rootfs / ext4 defaults 0 0\n"
+ "LABEL=UEFI /boot/efi vfat defaults 0 0\n"
+ "/dev/vdb /mnt auto defaults,noexec,comment=cloudconfig 0 2\n"
+ )
+ fstab_expected_content = fstab_original_content
+ cc = {"mounts": [["/dev/vdb", "/mnt", "auto", "defaults,noexec"]]}
+ with open(cc_mounts.FSTAB_PATH, "w") as fd:
+ fd.write(fstab_original_content)
+ with open(cc_mounts.FSTAB_PATH, "r") as fd:
+ fstab_new_content = fd.read()
+ self.assertEqual(fstab_expected_content, fstab_new_content)
+ cc_mounts.handle(None, cc, self.mock_cloud, self.mock_log, [])
+ self.m_subp_subp.assert_has_calls(
+ [
+ mock.call(["mount", "-a"]),
+ mock.call(["systemctl", "daemon-reload"]),
+ ]
+ )
+
+
+class TestCreateSwapfile:
+ @pytest.mark.parametrize("fstype", ("xfs", "btrfs", "ext4", "other"))
+ @mock.patch(M_PATH + "util.get_mount_info")
+ @mock.patch(M_PATH + "subp.subp")
+ def test_happy_path(self, m_subp, m_get_mount_info, fstype, tmpdir):
+ swap_file = tmpdir.join("swap-file")
+ fname = str(swap_file)
+
+ # Some of the calls to subp.subp should create the swap file; this
+ # roughly approximates that
+ m_subp.side_effect = lambda *args, **kwargs: swap_file.write("")
+
+ m_get_mount_info.return_value = (mock.ANY, fstype)
+
+ create_swapfile(fname, "")
+ assert mock.call(["mkswap", fname]) in m_subp.call_args_list
+
+ @mock.patch(M_PATH + "util.get_mount_info")
+ @mock.patch(M_PATH + "subp.subp")
+ def test_fallback_from_fallocate_to_dd(
+ self, m_subp, m_get_mount_info, caplog, tmpdir
+ ):
+ swap_file = tmpdir.join("swap-file")
+ fname = str(swap_file)
+
+ def subp_side_effect(cmd, *args, **kwargs):
+ # Mock fallocate failing, to initiate fallback
+ if cmd[0] == "fallocate":
+ raise ProcessExecutionError()
+
+ m_subp.side_effect = subp_side_effect
+ # Use ext4 so both fallocate and dd are valid swap creation methods
+ m_get_mount_info.return_value = (mock.ANY, "ext4")
+
+ create_swapfile(fname, "")
+
+ cmds = [args[0][0] for args, _kwargs in m_subp.call_args_list]
+ assert "fallocate" in cmds, "fallocate was not called"
+ assert "dd" in cmds, "fallocate failure did not fallback to dd"
+
+ assert cmds.index("dd") > cmds.index(
+ "fallocate"
+ ), "dd ran before fallocate"
+
+ assert mock.call(["mkswap", fname]) in m_subp.call_args_list
+
+ msg = "fallocate swap creation failed, will attempt with dd"
+ assert msg in caplog.text
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_ntp.py b/tests/unittests/config/test_cc_ntp.py
index 6b9c8377..fba141aa 100644
--- a/tests/unittests/test_handler/test_handler_ntp.py
+++ b/tests/unittests/config/test_cc_ntp.py
@@ -1,17 +1,19 @@
# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit.config import cc_ntp
-from cloudinit.sources import DataSourceNone
-from cloudinit import (distros, helpers, cloud, util)
-
-from cloudinit.tests.helpers import (
- CiTestCase, FilesystemMockingTestCase, mock, skipUnlessJsonSchema)
-
-
import copy
import os
-from os.path import dirname
import shutil
+from functools import partial
+from os.path import dirname
+
+from cloudinit import helpers, util
+from cloudinit.config import cc_ntp
+from tests.unittests.helpers import (
+ CiTestCase,
+ FilesystemMockingTestCase,
+ mock,
+ skipUnlessJsonSchema,
+)
+from tests.unittests.util import get_cloud
NTP_TEMPLATE = """\
## template: jinja
@@ -35,25 +37,18 @@ class TestNtp(FilesystemMockingTestCase):
def setUp(self):
super(TestNtp, self).setUp()
self.new_root = self.tmp_dir()
- self.add_patch('cloudinit.util.system_is_snappy', 'm_snappy')
+ self.add_patch("cloudinit.util.system_is_snappy", "m_snappy")
self.m_snappy.return_value = False
- self.add_patch('cloudinit.util.system_info', 'm_sysinfo')
- self.m_sysinfo.return_value = {'dist': ('Distro', '99.1', 'Codename')}
-
- def _get_cloud(self, distro, sys_cfg=None):
- self.new_root = self.reRoot(root=self.new_root)
- paths = helpers.Paths({'templates_dir': self.new_root})
- cls = distros.fetch(distro)
- if not sys_cfg:
- sys_cfg = {}
- mydist = cls(distro, sys_cfg, paths)
- myds = DataSourceNone.DataSourceNone(sys_cfg, mydist, paths)
- return cloud.Cloud(myds, paths, sys_cfg, mydist, None)
+ self.new_root = self.reRoot()
+ self._get_cloud = partial(
+ get_cloud, paths=helpers.Paths({"templates_dir": self.new_root})
+ )
def _get_template_path(self, template_name, distro, basepath=None):
# ntp.conf.{distro} -> ntp.conf.debian.tmpl
- template_fn = '{0}.tmpl'.format(
- template_name.replace('{distro}', distro))
+ template_fn = "{0}.tmpl".format(
+ template_name.replace("{distro}", distro)
+ )
if not basepath:
basepath = self.new_root
path = os.path.join(basepath, template_fn)
@@ -62,25 +57,25 @@ class TestNtp(FilesystemMockingTestCase):
def _generate_template(self, template=None):
if not template:
template = NTP_TEMPLATE
- confpath = os.path.join(self.new_root, 'client.conf')
- template_fn = os.path.join(self.new_root, 'client.conf.tmpl')
+ confpath = os.path.join(self.new_root, "client.conf")
+ template_fn = os.path.join(self.new_root, "client.conf.tmpl")
util.write_file(template_fn, content=template)
return (confpath, template_fn)
def _mock_ntp_client_config(self, client=None, distro=None):
if not client:
- client = 'ntp'
+ client = "ntp"
if not distro:
- distro = 'ubuntu'
+ distro = "ubuntu"
dcfg = cc_ntp.distro_ntp_client_configs(distro)
- if client == 'systemd-timesyncd':
+ if client == "systemd-timesyncd":
template = TIMESYNCD_TEMPLATE
else:
template = NTP_TEMPLATE
(confpath, _template_fn) = self._generate_template(template=template)
ntpconfig = copy.deepcopy(dcfg[client])
- ntpconfig['confpath'] = confpath
- ntpconfig['template_name'] = os.path.basename(confpath)
+ ntpconfig["confpath"] = confpath
+ ntpconfig["template_name"] = os.path.basename(confpath)
return ntpconfig
@mock.patch("cloudinit.config.cc_ntp.subp")
@@ -88,19 +83,21 @@ class TestNtp(FilesystemMockingTestCase):
"""ntp_install_client runs install_func when check_exe is absent."""
mock_subp.which.return_value = None # check_exe not found.
install_func = mock.MagicMock()
- cc_ntp.install_ntp_client(install_func,
- packages=['ntpx'], check_exe='ntpdx')
- mock_subp.which.assert_called_with('ntpdx')
- install_func.assert_called_once_with(['ntpx'])
+ cc_ntp.install_ntp_client(
+ install_func, packages=["ntpx"], check_exe="ntpdx"
+ )
+ mock_subp.which.assert_called_with("ntpdx")
+ install_func.assert_called_once_with(["ntpx"])
@mock.patch("cloudinit.config.cc_ntp.subp")
def test_ntp_install_not_needed(self, mock_subp):
"""ntp_install_client doesn't install when check_exe is found."""
- client = 'chrony'
+ client = "chrony"
mock_subp.which.return_value = [client] # check_exe found.
install_func = mock.MagicMock()
- cc_ntp.install_ntp_client(install_func, packages=[client],
- check_exe=client)
+ cc_ntp.install_ntp_client(
+ install_func, packages=[client], check_exe=client
+ )
install_func.assert_not_called()
@mock.patch("cloudinit.config.cc_ntp.subp")
@@ -108,26 +105,11 @@ class TestNtp(FilesystemMockingTestCase):
"""ntp_install_client runs install_func with empty list"""
mock_subp.which.return_value = None # check_exe not found
install_func = mock.MagicMock()
- cc_ntp.install_ntp_client(install_func, packages=[],
- check_exe='timesyncd')
+ cc_ntp.install_ntp_client(
+ install_func, packages=[], check_exe="timesyncd"
+ )
install_func.assert_called_once_with([])
- @mock.patch("cloudinit.config.cc_ntp.subp")
- def test_reload_ntp_defaults(self, mock_subp):
- """Test service is restarted/reloaded (defaults)"""
- service = 'ntp_service_name'
- cmd = ['service', service, 'restart']
- cc_ntp.reload_ntp(service)
- mock_subp.subp.assert_called_with(cmd, capture=True)
-
- @mock.patch("cloudinit.config.cc_ntp.subp")
- def test_reload_ntp_systemd(self, mock_subp):
- """Test service is restarted/reloaded (systemd)"""
- service = 'ntp_service_name'
- cc_ntp.reload_ntp(service, systemd=True)
- cmd = ['systemctl', 'reload-or-restart', service]
- mock_subp.subp.assert_called_with(cmd, capture=True)
-
def test_ntp_rename_ntp_conf(self):
"""When NTP_CONF exists, rename_ntp moves it."""
ntpconf = self.tmp_path("ntp.conf", self.new_root)
@@ -147,18 +129,22 @@ class TestNtp(FilesystemMockingTestCase):
def test_write_ntp_config_template_uses_ntp_conf_distro_no_servers(self):
"""write_ntp_config_template reads from $client.conf.distro.tmpl"""
servers = []
- pools = ['10.0.0.1', '10.0.0.2']
+ pools = ["10.0.0.1", "10.0.0.2"]
(confpath, template_fn) = self._generate_template()
- mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR'
+ mock_path = "cloudinit.config.cc_ntp.temp_utils._TMPDIR"
with mock.patch(mock_path, self.new_root):
- cc_ntp.write_ntp_config_template('ubuntu',
- servers=servers, pools=pools,
- path=confpath,
- template_fn=template_fn,
- template=None)
+ cc_ntp.write_ntp_config_template(
+ "ubuntu",
+ servers=servers,
+ pools=pools,
+ path=confpath,
+ template_fn=template_fn,
+ template=None,
+ )
self.assertEqual(
"servers []\npools ['10.0.0.1', '10.0.0.2']\n",
- util.load_file(confpath))
+ util.load_file(confpath),
+ )
def test_write_ntp_config_template_defaults_pools_w_empty_lists(self):
"""write_ntp_config_template defaults pools servers upon empty config.
@@ -166,20 +152,23 @@ class TestNtp(FilesystemMockingTestCase):
When both pools and servers are empty, default NR_POOL_SERVERS get
configured.
"""
- distro = 'ubuntu'
+ distro = "ubuntu"
pools = cc_ntp.generate_server_names(distro)
servers = []
(confpath, template_fn) = self._generate_template()
- mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR'
+ mock_path = "cloudinit.config.cc_ntp.temp_utils._TMPDIR"
with mock.patch(mock_path, self.new_root):
- cc_ntp.write_ntp_config_template(distro,
- servers=servers, pools=pools,
- path=confpath,
- template_fn=template_fn,
- template=None)
+ cc_ntp.write_ntp_config_template(
+ distro,
+ servers=servers,
+ pools=pools,
+ path=confpath,
+ template_fn=template_fn,
+ template=None,
+ )
self.assertEqual(
- "servers []\npools {0}\n".format(pools),
- util.load_file(confpath))
+ "servers []\npools {0}\n".format(pools), util.load_file(confpath)
+ )
def test_defaults_pools_empty_lists_sles(self):
"""write_ntp_config_template defaults opensuse pools upon empty config.
@@ -187,39 +176,50 @@ class TestNtp(FilesystemMockingTestCase):
When both pools and servers are empty, default NR_POOL_SERVERS get
configured.
"""
- distro = 'sles'
+ distro = "sles"
default_pools = cc_ntp.generate_server_names(distro)
(confpath, template_fn) = self._generate_template()
- cc_ntp.write_ntp_config_template(distro,
- servers=[], pools=[],
- path=confpath,
- template_fn=template_fn,
- template=None)
+ cc_ntp.write_ntp_config_template(
+ distro,
+ servers=[],
+ pools=[],
+ path=confpath,
+ template_fn=template_fn,
+ template=None,
+ )
for pool in default_pools:
- self.assertIn('opensuse', pool)
+ self.assertIn("opensuse", pool)
self.assertEqual(
"servers []\npools {0}\n".format(default_pools),
- util.load_file(confpath))
+ util.load_file(confpath),
+ )
self.assertIn(
"Adding distro default ntp pool servers: {0}".format(
- ",".join(default_pools)),
- self.logs.getvalue())
+ ",".join(default_pools)
+ ),
+ self.logs.getvalue(),
+ )
def test_timesyncd_template(self):
"""Test timesycnd template is correct"""
- pools = ['0.mycompany.pool.ntp.org', '3.mycompany.pool.ntp.org']
- servers = ['192.168.23.3', '192.168.23.4']
+ pools = ["0.mycompany.pool.ntp.org", "3.mycompany.pool.ntp.org"]
+ servers = ["192.168.23.3", "192.168.23.4"]
(confpath, template_fn) = self._generate_template(
- template=TIMESYNCD_TEMPLATE)
- cc_ntp.write_ntp_config_template('ubuntu',
- servers=servers, pools=pools,
- path=confpath,
- template_fn=template_fn,
- template=None)
+ template=TIMESYNCD_TEMPLATE
+ )
+ cc_ntp.write_ntp_config_template(
+ "ubuntu",
+ servers=servers,
+ pools=pools,
+ path=confpath,
+ template_fn=template_fn,
+ template=None,
+ )
self.assertEqual(
"[Time]\nNTP=%s %s \n" % (" ".join(servers), " ".join(pools)),
- util.load_file(confpath))
+ util.load_file(confpath),
+ )
def test_distro_ntp_client_configs(self):
"""Test we have updated ntp client configs on different distros"""
@@ -236,55 +236,62 @@ class TestNtp(FilesystemMockingTestCase):
result = cc_ntp.distro_ntp_client_configs(distro)
for client in delta[distro].keys():
for key in delta[distro][client].keys():
- self.assertEqual(delta[distro][client][key],
- result[client][key])
+ self.assertEqual(
+ delta[distro][client][key], result[client][key]
+ )
def _get_expected_pools(self, pools, distro, client):
- if client in ['ntp', 'chrony']:
- if client == 'ntp' and distro == 'alpine':
+ if client in ["ntp", "chrony"]:
+ if client == "ntp" and distro == "alpine":
# NTP for Alpine Linux is Busybox's ntp which does not
# support 'pool' lines in its configuration file.
expected_pools = []
else:
expected_pools = [
- 'pool {0} iburst'.format(pool) for pool in pools]
- elif client == 'systemd-timesyncd':
+ "pool {0} iburst".format(pool) for pool in pools
+ ]
+ elif client == "systemd-timesyncd":
expected_pools = " ".join(pools)
return expected_pools
def _get_expected_servers(self, servers, distro, client):
- if client in ['ntp', 'chrony']:
- if client == 'ntp' and distro == 'alpine':
+ if client in ["ntp", "chrony"]:
+ if client == "ntp" and distro == "alpine":
# NTP for Alpine Linux is Busybox's ntp which only supports
# 'server' lines without iburst option.
expected_servers = [
- 'server {0}'.format(srv) for srv in servers]
+ "server {0}".format(srv) for srv in servers
+ ]
else:
expected_servers = [
- 'server {0} iburst'.format(srv) for srv in servers]
- elif client == 'systemd-timesyncd':
+ "server {0} iburst".format(srv) for srv in servers
+ ]
+ elif client == "systemd-timesyncd":
expected_servers = " ".join(servers)
return expected_servers
def test_ntp_handler_real_distro_ntp_templates(self):
"""Test ntp handler renders the shipped distro ntp client templates."""
- pools = ['0.mycompany.pool.ntp.org', '3.mycompany.pool.ntp.org']
- servers = ['192.168.23.3', '192.168.23.4']
- for client in ['ntp', 'systemd-timesyncd', 'chrony']:
+ pools = ["0.mycompany.pool.ntp.org", "3.mycompany.pool.ntp.org"]
+ servers = ["192.168.23.3", "192.168.23.4"]
+ for client in ["ntp", "systemd-timesyncd", "chrony"]:
for distro in cc_ntp.distros:
distro_cfg = cc_ntp.distro_ntp_client_configs(distro)
ntpclient = distro_cfg[client]
- confpath = (
- os.path.join(self.new_root, ntpclient.get('confpath')[1:]))
- template = ntpclient.get('template_name')
+ confpath = os.path.join(
+ self.new_root, ntpclient.get("confpath")[1:]
+ )
+ template = ntpclient.get("template_name")
# find sourcetree template file
root_dir = (
- dirname(dirname(os.path.realpath(util.__file__))) +
- '/templates')
- source_fn = self._get_template_path(template, distro,
- basepath=root_dir)
+ dirname(dirname(os.path.realpath(util.__file__)))
+ + "/templates"
+ )
+ source_fn = self._get_template_path(
+ template, distro, basepath=root_dir
+ )
template_fn = self._get_template_path(template, distro)
# don't fail if cloud-init doesn't have a template for
# a distro,client pair
@@ -292,64 +299,77 @@ class TestNtp(FilesystemMockingTestCase):
continue
# Create a copy in our tmp_dir
shutil.copy(source_fn, template_fn)
- cc_ntp.write_ntp_config_template(distro, servers=servers,
- pools=pools, path=confpath,
- template_fn=template_fn)
+ cc_ntp.write_ntp_config_template(
+ distro,
+ servers=servers,
+ pools=pools,
+ path=confpath,
+ template_fn=template_fn,
+ )
content = util.load_file(confpath)
- if client in ['ntp', 'chrony']:
+ if client in ["ntp", "chrony"]:
content_lines = content.splitlines()
- expected_servers = self._get_expected_servers(servers,
- distro,
- client)
- print('distro=%s client=%s' % (distro, client))
+ expected_servers = self._get_expected_servers(
+ servers, distro, client
+ )
+ print("distro=%s client=%s" % (distro, client))
for sline in expected_servers:
- self.assertIn(sline, content_lines,
- ('failed to render {0} conf'
- ' for distro:{1}'.format(client,
- distro)))
- expected_pools = self._get_expected_pools(pools, distro,
- client)
+ self.assertIn(
+ sline,
+ content_lines,
+ "failed to render {0} conf for distro:{1}".format(
+ client, distro
+ ),
+ )
+ expected_pools = self._get_expected_pools(
+ pools, distro, client
+ )
if expected_pools != []:
for pline in expected_pools:
- self.assertIn(pline, content_lines,
- ('failed to render {0} conf'
- ' for distro:{1}'.format(client,
- distro)))
- elif client == 'systemd-timesyncd':
- expected_servers = self._get_expected_servers(servers,
- distro,
- client)
- expected_pools = self._get_expected_pools(pools,
- distro,
- client)
+ self.assertIn(
+ pline,
+ content_lines,
+ "failed to render {0} conf"
+ " for distro:{1}".format(client, distro),
+ )
+ elif client == "systemd-timesyncd":
+ expected_servers = self._get_expected_servers(
+ servers, distro, client
+ )
+ expected_pools = self._get_expected_pools(
+ pools, distro, client
+ )
expected_content = (
- "# cloud-init generated file\n" +
- "# See timesyncd.conf(5) for details.\n\n" +
- "[Time]\nNTP=%s %s \n" % (expected_servers,
- expected_pools))
+ "# cloud-init generated file\n"
+ + "# See timesyncd.conf(5) for details.\n\n"
+ + "[Time]\nNTP=%s %s \n"
+ % (expected_servers, expected_pools)
+ )
self.assertEqual(expected_content, content)
def test_no_ntpcfg_does_nothing(self):
"""When no ntp section is defined handler logs a warning and noops."""
- cc_ntp.handle('cc_ntp', {}, None, None, [])
+ cc_ntp.handle("cc_ntp", {}, None, None, [])
self.assertEqual(
- 'DEBUG: Skipping module named cc_ntp, '
- 'not present or disabled by cfg\n',
- self.logs.getvalue())
-
- @mock.patch('cloudinit.config.cc_ntp.select_ntp_client')
- def test_ntp_handler_schema_validation_allows_empty_ntp_config(self,
- m_select):
+ "DEBUG: Skipping module named cc_ntp, "
+ "not present or disabled by cfg\n",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("cloudinit.config.cc_ntp.select_ntp_client")
+ def test_ntp_handler_schema_validation_allows_empty_ntp_config(
+ self, m_select
+ ):
"""Ntp schema validation allows for an empty ntp: configuration."""
- valid_empty_configs = [{'ntp': {}}, {'ntp': None}]
+ valid_empty_configs = [{"ntp": {}}, {"ntp": None}]
for valid_empty_config in valid_empty_configs:
for distro in cc_ntp.distros:
mycloud = self._get_cloud(distro)
ntpconfig = self._mock_ntp_client_config(distro=distro)
- confpath = ntpconfig['confpath']
+ confpath = ntpconfig["confpath"]
m_select.return_value = ntpconfig
- cc_ntp.handle('cc_ntp', valid_empty_config, mycloud, None, [])
- if distro == 'alpine':
+ cc_ntp.handle("cc_ntp", valid_empty_config, mycloud, None, [])
+ if distro == "alpine":
# _mock_ntp_client_config call above did not specify a
# client value and so it defaults to "ntp" which on
# Alpine Linux only supports servers and not pools.
@@ -357,213 +377,243 @@ class TestNtp(FilesystemMockingTestCase):
servers = cc_ntp.generate_server_names(mycloud.distro.name)
self.assertEqual(
"servers {0}\npools []\n".format(servers),
- util.load_file(confpath))
+ util.load_file(confpath),
+ )
else:
pools = cc_ntp.generate_server_names(mycloud.distro.name)
self.assertEqual(
"servers []\npools {0}\n".format(pools),
- util.load_file(confpath))
- self.assertNotIn('Invalid config:', self.logs.getvalue())
+ util.load_file(confpath),
+ )
+ self.assertNotIn(
+ "Invalid cloud-config provided:", self.logs.getvalue()
+ )
@skipUnlessJsonSchema()
- @mock.patch('cloudinit.config.cc_ntp.select_ntp_client')
- def test_ntp_handler_schema_validation_warns_non_string_item_type(self,
- m_sel):
+ @mock.patch("cloudinit.config.cc_ntp.select_ntp_client")
+ def test_ntp_handler_schema_validation_warns_non_string_item_type(
+ self, m_sel
+ ):
"""Ntp schema validation warns of non-strings in pools or servers.
Schema validation is not strict, so ntp config is still be rendered.
"""
- invalid_config = {'ntp': {'pools': [123], 'servers': ['valid', None]}}
+ invalid_config = {"ntp": {"pools": [123], "servers": ["valid", None]}}
for distro in cc_ntp.distros:
mycloud = self._get_cloud(distro)
ntpconfig = self._mock_ntp_client_config(distro=distro)
- confpath = ntpconfig['confpath']
+ confpath = ntpconfig["confpath"]
m_sel.return_value = ntpconfig
- cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, [])
+ cc_ntp.handle("cc_ntp", invalid_config, mycloud, None, [])
self.assertIn(
- "Invalid config:\nntp.pools.0: 123 is not of type 'string'\n"
- "ntp.servers.1: None is not of type 'string'",
- self.logs.getvalue())
- self.assertEqual("servers ['valid', None]\npools [123]\n",
- util.load_file(confpath))
+ "Invalid cloud-config provided:\nntp.pools.0: 123 is not of"
+ " type 'string'\nntp.servers.1: None is not of type 'string'",
+ self.logs.getvalue(),
+ )
+ self.assertEqual(
+ "servers ['valid', None]\npools [123]\n",
+ util.load_file(confpath),
+ )
@skipUnlessJsonSchema()
- @mock.patch('cloudinit.config.cc_ntp.select_ntp_client')
- def test_ntp_handler_schema_validation_warns_of_non_array_type(self,
- m_select):
+ @mock.patch("cloudinit.config.cc_ntp.select_ntp_client")
+ def test_ntp_handler_schema_validation_warns_of_non_array_type(
+ self, m_select
+ ):
"""Ntp schema validation warns of non-array pools or servers types.
Schema validation is not strict, so ntp config is still be rendered.
"""
- invalid_config = {'ntp': {'pools': 123, 'servers': 'non-array'}}
+ invalid_config = {"ntp": {"pools": 123, "servers": "non-array"}}
for distro in cc_ntp.distros:
mycloud = self._get_cloud(distro)
ntpconfig = self._mock_ntp_client_config(distro=distro)
- confpath = ntpconfig['confpath']
+ confpath = ntpconfig["confpath"]
m_select.return_value = ntpconfig
- cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, [])
+ cc_ntp.handle("cc_ntp", invalid_config, mycloud, None, [])
self.assertIn(
- "Invalid config:\nntp.pools: 123 is not of type 'array'\n"
- "ntp.servers: 'non-array' is not of type 'array'",
- self.logs.getvalue())
- self.assertEqual("servers non-array\npools 123\n",
- util.load_file(confpath))
+ "Invalid cloud-config provided:\nntp.pools: 123 is not of type"
+ " 'array'\nntp.servers: 'non-array' is not of type 'array'",
+ self.logs.getvalue(),
+ )
+ self.assertEqual(
+ "servers non-array\npools 123\n", util.load_file(confpath)
+ )
@skipUnlessJsonSchema()
- @mock.patch('cloudinit.config.cc_ntp.select_ntp_client')
- def test_ntp_handler_schema_validation_warns_invalid_key_present(self,
- m_select):
+ @mock.patch("cloudinit.config.cc_ntp.select_ntp_client")
+ def test_ntp_handler_schema_validation_warns_invalid_key_present(
+ self, m_select
+ ):
"""Ntp schema validation warns of invalid keys present in ntp config.
Schema validation is not strict, so ntp config is still be rendered.
"""
invalid_config = {
- 'ntp': {'invalidkey': 1, 'pools': ['0.mycompany.pool.ntp.org']}}
+ "ntp": {"invalidkey": 1, "pools": ["0.mycompany.pool.ntp.org"]}
+ }
for distro in cc_ntp.distros:
- if distro != 'alpine':
+ if distro != "alpine":
mycloud = self._get_cloud(distro)
ntpconfig = self._mock_ntp_client_config(distro=distro)
- confpath = ntpconfig['confpath']
+ confpath = ntpconfig["confpath"]
m_select.return_value = ntpconfig
- cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, [])
+ cc_ntp.handle("cc_ntp", invalid_config, mycloud, None, [])
self.assertIn(
- "Invalid config:\nntp: Additional properties are not "
- "allowed ('invalidkey' was unexpected)",
- self.logs.getvalue())
+ "Invalid cloud-config provided:\nntp: Additional"
+ " properties are not allowed ('invalidkey' was"
+ " unexpected)",
+ self.logs.getvalue(),
+ )
self.assertEqual(
"servers []\npools ['0.mycompany.pool.ntp.org']\n",
- util.load_file(confpath))
+ util.load_file(confpath),
+ )
@skipUnlessJsonSchema()
- @mock.patch('cloudinit.config.cc_ntp.select_ntp_client')
+ @mock.patch("cloudinit.config.cc_ntp.select_ntp_client")
def test_ntp_handler_schema_validation_warns_of_duplicates(self, m_select):
"""Ntp schema validation warns of duplicates in servers or pools.
Schema validation is not strict, so ntp config is still be rendered.
"""
invalid_config = {
- 'ntp': {'pools': ['0.mypool.org', '0.mypool.org'],
- 'servers': ['10.0.0.1', '10.0.0.1']}}
+ "ntp": {
+ "pools": ["0.mypool.org", "0.mypool.org"],
+ "servers": ["10.0.0.1", "10.0.0.1"],
+ }
+ }
for distro in cc_ntp.distros:
mycloud = self._get_cloud(distro)
ntpconfig = self._mock_ntp_client_config(distro=distro)
- confpath = ntpconfig['confpath']
+ confpath = ntpconfig["confpath"]
m_select.return_value = ntpconfig
- cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, [])
+ cc_ntp.handle("cc_ntp", invalid_config, mycloud, None, [])
self.assertIn(
- "Invalid config:\nntp.pools: ['0.mypool.org', '0.mypool.org']"
- " has non-unique elements\nntp.servers: "
+ "Invalid cloud-config provided:\nntp.pools: ['0.mypool.org',"
+ " '0.mypool.org'] has non-unique elements\nntp.servers: "
"['10.0.0.1', '10.0.0.1'] has non-unique elements",
- self.logs.getvalue())
+ self.logs.getvalue(),
+ )
self.assertEqual(
"servers ['10.0.0.1', '10.0.0.1']\n"
"pools ['0.mypool.org', '0.mypool.org']\n",
- util.load_file(confpath))
+ util.load_file(confpath),
+ )
- @mock.patch('cloudinit.config.cc_ntp.select_ntp_client')
+ @mock.patch("cloudinit.config.cc_ntp.select_ntp_client")
def test_ntp_handler_timesyncd(self, m_select):
"""Test ntp handler configures timesyncd"""
- servers = ['192.168.2.1', '192.168.2.2']
- pools = ['0.mypool.org']
- cfg = {'ntp': {'servers': servers, 'pools': pools}}
- client = 'systemd-timesyncd'
+ servers = ["192.168.2.1", "192.168.2.2"]
+ pools = ["0.mypool.org"]
+ cfg = {"ntp": {"servers": servers, "pools": pools}}
+ client = "systemd-timesyncd"
for distro in cc_ntp.distros:
mycloud = self._get_cloud(distro)
- ntpconfig = self._mock_ntp_client_config(distro=distro,
- client=client)
- confpath = ntpconfig['confpath']
+ ntpconfig = self._mock_ntp_client_config(
+ distro=distro, client=client
+ )
+ confpath = ntpconfig["confpath"]
m_select.return_value = ntpconfig
- cc_ntp.handle('cc_ntp', cfg, mycloud, None, [])
+ cc_ntp.handle("cc_ntp", cfg, mycloud, None, [])
self.assertEqual(
"[Time]\nNTP=192.168.2.1 192.168.2.2 0.mypool.org \n",
- util.load_file(confpath))
+ util.load_file(confpath),
+ )
- @mock.patch('cloudinit.config.cc_ntp.select_ntp_client')
+ @mock.patch("cloudinit.config.cc_ntp.select_ntp_client")
def test_ntp_handler_enabled_false(self, m_select):
- """Test ntp handler does not run if enabled: false """
- cfg = {'ntp': {'enabled': False}}
+ """Test ntp handler does not run if enabled: false"""
+ cfg = {"ntp": {"enabled": False}}
for distro in cc_ntp.distros:
mycloud = self._get_cloud(distro)
- cc_ntp.handle('notimportant', cfg, mycloud, None, None)
+ cc_ntp.handle("notimportant", cfg, mycloud, None, None)
self.assertEqual(0, m_select.call_count)
+ @mock.patch("cloudinit.distros.subp")
@mock.patch("cloudinit.config.cc_ntp.subp")
- @mock.patch('cloudinit.config.cc_ntp.select_ntp_client')
+ @mock.patch("cloudinit.config.cc_ntp.select_ntp_client")
@mock.patch("cloudinit.distros.Distro.uses_systemd")
- def test_ntp_the_whole_package(self, m_sysd, m_select, m_subp):
- """Test enabled config renders template, and restarts service """
- cfg = {'ntp': {'enabled': True}}
+ def test_ntp_the_whole_package(self, m_sysd, m_select, m_subp, m_dsubp):
+ """Test enabled config renders template, and restarts service"""
+ cfg = {"ntp": {"enabled": True}}
for distro in cc_ntp.distros:
mycloud = self._get_cloud(distro)
ntpconfig = self._mock_ntp_client_config(distro=distro)
- confpath = ntpconfig['confpath']
- service_name = ntpconfig['service_name']
+ confpath = ntpconfig["confpath"]
+ service_name = ntpconfig["service_name"]
m_select.return_value = ntpconfig
hosts = cc_ntp.generate_server_names(mycloud.distro.name)
uses_systemd = True
- expected_service_call = ['systemctl', 'reload-or-restart',
- service_name]
+ expected_service_call = [
+ "systemctl",
+ "reload-or-restart",
+ service_name,
+ ]
expected_content = "servers []\npools {0}\n".format(hosts)
- if distro == 'alpine':
+ if distro == "alpine":
uses_systemd = False
- expected_service_call = ['service', service_name, 'restart']
+ expected_service_call = ["rc-service", service_name, "restart"]
# _mock_ntp_client_config call above did not specify a client
# value and so it defaults to "ntp" which on Alpine Linux only
# supports servers and not pools.
expected_content = "servers {0}\npools []\n".format(hosts)
m_sysd.return_value = uses_systemd
- with mock.patch('cloudinit.config.cc_ntp.util') as m_util:
+ with mock.patch("cloudinit.config.cc_ntp.util") as m_util:
# allow use of util.mergemanydict
m_util.mergemanydict.side_effect = util.mergemanydict
# default client is present
m_subp.which.return_value = True
# use the config 'enabled' value
m_util.is_false.return_value = util.is_false(
- cfg['ntp']['enabled'])
- cc_ntp.handle('notimportant', cfg, mycloud, None, None)
- m_subp.subp.assert_called_with(
- expected_service_call, capture=True)
+ cfg["ntp"]["enabled"]
+ )
+ cc_ntp.handle("notimportant", cfg, mycloud, None, None)
+ m_dsubp.subp.assert_called_with(
+ expected_service_call, capture=True
+ )
self.assertEqual(expected_content, util.load_file(confpath))
- def test_opensuse_picks_chrony(self):
+ @mock.patch("cloudinit.util.system_info")
+ def test_opensuse_picks_chrony(self, m_sysinfo):
"""Test opensuse picks chrony or ntp on certain distro versions"""
# < 15.0 => ntp
- self.m_sysinfo.return_value = {'dist':
- ('openSUSE', '13.2', 'Harlequin')}
- mycloud = self._get_cloud('opensuse')
+ m_sysinfo.return_value = {"dist": ("openSUSE", "13.2", "Harlequin")}
+ mycloud = self._get_cloud("opensuse")
expected_client = mycloud.distro.preferred_ntp_clients[0]
- self.assertEqual('ntp', expected_client)
+ self.assertEqual("ntp", expected_client)
# >= 15.0 and not openSUSE => chrony
- self.m_sysinfo.return_value = {'dist':
- ('SLES', '15.0',
- 'SUSE Linux Enterprise Server 15')}
- mycloud = self._get_cloud('sles')
+ m_sysinfo.return_value = {
+ "dist": ("SLES", "15.0", "SUSE Linux Enterprise Server 15")
+ }
+ mycloud = self._get_cloud("sles")
expected_client = mycloud.distro.preferred_ntp_clients[0]
- self.assertEqual('chrony', expected_client)
+ self.assertEqual("chrony", expected_client)
# >= 15.0 and openSUSE and ver != 42 => chrony
- self.m_sysinfo.return_value = {'dist': ('openSUSE Tumbleweed',
- '20180326',
- 'timbleweed')}
- mycloud = self._get_cloud('opensuse')
+ m_sysinfo.return_value = {
+ "dist": ("openSUSE Tumbleweed", "20180326", "timbleweed")
+ }
+ mycloud = self._get_cloud("opensuse")
expected_client = mycloud.distro.preferred_ntp_clients[0]
- self.assertEqual('chrony', expected_client)
+ self.assertEqual("chrony", expected_client)
- def test_ubuntu_xenial_picks_ntp(self):
+ @mock.patch("cloudinit.util.system_info")
+ def test_ubuntu_xenial_picks_ntp(self, m_sysinfo):
"""Test Ubuntu picks ntp on xenial release"""
- self.m_sysinfo.return_value = {'dist': ('Ubuntu', '16.04', 'xenial')}
- mycloud = self._get_cloud('ubuntu')
+ m_sysinfo.return_value = {"dist": ("Ubuntu", "16.04", "xenial")}
+ mycloud = self._get_cloud("ubuntu")
expected_client = mycloud.distro.preferred_ntp_clients[0]
- self.assertEqual('ntp', expected_client)
+ self.assertEqual("ntp", expected_client)
- @mock.patch('cloudinit.config.cc_ntp.subp.which')
+ @mock.patch("cloudinit.config.cc_ntp.subp.which")
def test_snappy_system_picks_timesyncd(self, m_which):
"""Test snappy systems prefer installed clients"""
@@ -571,26 +621,27 @@ class TestNtp(FilesystemMockingTestCase):
self.m_snappy.return_value = True
# ubuntu core systems will have timesyncd installed
- m_which.side_effect = iter([None, '/lib/systemd/systemd-timesyncd',
- None, None, None])
- distro = 'ubuntu'
+ m_which.side_effect = iter(
+ [None, "/lib/systemd/systemd-timesyncd", None, None, None]
+ )
+ distro = "ubuntu"
mycloud = self._get_cloud(distro)
distro_configs = cc_ntp.distro_ntp_client_configs(distro)
- expected_client = 'systemd-timesyncd'
+ expected_client = "systemd-timesyncd"
expected_cfg = distro_configs[expected_client]
expected_calls = []
# we only get to timesyncd
for client in mycloud.distro.preferred_ntp_clients[0:2]:
cfg = distro_configs[client]
- expected_calls.append(mock.call(cfg['check_exe']))
+ expected_calls.append(mock.call(cfg["check_exe"]))
result = cc_ntp.select_ntp_client(None, mycloud.distro)
m_which.assert_has_calls(expected_calls)
self.assertEqual(sorted(expected_cfg), sorted(cfg))
self.assertEqual(sorted(expected_cfg), sorted(result))
- @mock.patch('cloudinit.config.cc_ntp.subp.which')
+ @mock.patch("cloudinit.config.cc_ntp.subp.which")
def test_ntp_distro_searches_all_preferred_clients(self, m_which):
- """Test select_ntp_client search all distro perferred clients """
+ """Test select_ntp_client search all distro perferred clients"""
# nothing is installed
m_which.return_value = None
for distro in cc_ntp.distros:
@@ -601,12 +652,12 @@ class TestNtp(FilesystemMockingTestCase):
expected_calls = []
for client in mycloud.distro.preferred_ntp_clients:
cfg = distro_configs[client]
- expected_calls.append(mock.call(cfg['check_exe']))
+ expected_calls.append(mock.call(cfg["check_exe"]))
cc_ntp.select_ntp_client({}, mycloud.distro)
m_which.assert_has_calls(expected_calls)
self.assertEqual(sorted(expected_cfg), sorted(cfg))
- @mock.patch('cloudinit.config.cc_ntp.subp.which')
+ @mock.patch("cloudinit.config.cc_ntp.subp.which")
def test_user_cfg_ntp_client_auto_uses_distro_clients(self, m_which):
"""Test user_cfg.ntp_client='auto' defaults to distro search"""
# nothing is installed
@@ -619,34 +670,36 @@ class TestNtp(FilesystemMockingTestCase):
expected_calls = []
for client in mycloud.distro.preferred_ntp_clients:
cfg = distro_configs[client]
- expected_calls.append(mock.call(cfg['check_exe']))
- cc_ntp.select_ntp_client('auto', mycloud.distro)
+ expected_calls.append(mock.call(cfg["check_exe"]))
+ cc_ntp.select_ntp_client("auto", mycloud.distro)
m_which.assert_has_calls(expected_calls)
self.assertEqual(sorted(expected_cfg), sorted(cfg))
- @mock.patch('cloudinit.config.cc_ntp.write_ntp_config_template')
- @mock.patch('cloudinit.cloud.Cloud.get_template_filename')
- @mock.patch('cloudinit.config.cc_ntp.subp.which')
- def test_ntp_custom_client_overrides_installed_clients(self, m_which,
- m_tmpfn, m_write):
- """Test user client is installed despite other clients present """
- client = 'ntpdate'
- cfg = {'ntp': {'ntp_client': client}}
+ @mock.patch("cloudinit.config.cc_ntp.write_ntp_config_template")
+ @mock.patch("cloudinit.cloud.Cloud.get_template_filename")
+ @mock.patch("cloudinit.config.cc_ntp.subp.which")
+ def test_ntp_custom_client_overrides_installed_clients(
+ self, m_which, m_tmpfn, m_write
+ ):
+ """Test user client is installed despite other clients present"""
+ client = "ntpdate"
+ cfg = {"ntp": {"ntp_client": client}}
for distro in cc_ntp.distros:
# client is not installed
m_which.side_effect = iter([None])
mycloud = self._get_cloud(distro)
- with mock.patch.object(mycloud.distro,
- 'install_packages') as m_install:
- cc_ntp.handle('notimportant', cfg, mycloud, None, None)
+ with mock.patch.object(
+ mycloud.distro, "install_packages"
+ ) as m_install:
+ cc_ntp.handle("notimportant", cfg, mycloud, None, None)
m_install.assert_called_with([client])
m_which.assert_called_with(client)
- @mock.patch('cloudinit.config.cc_ntp.subp.which')
+ @mock.patch("cloudinit.config.cc_ntp.subp.which")
def test_ntp_system_config_overrides_distro_builtin_clients(self, m_which):
"""Test distro system_config overrides builtin preferred ntp clients"""
- system_client = 'chrony'
- sys_cfg = {'ntp_client': system_client}
+ system_client = "chrony"
+ sys_cfg = {"ntp_client": system_client}
# no clients installed
m_which.return_value = None
for distro in cc_ntp.distros:
@@ -657,12 +710,12 @@ class TestNtp(FilesystemMockingTestCase):
self.assertEqual(sorted(expected_cfg), sorted(result))
m_which.assert_has_calls([])
- @mock.patch('cloudinit.config.cc_ntp.subp.which')
+ @mock.patch("cloudinit.config.cc_ntp.subp.which")
def test_ntp_user_config_overrides_system_cfg(self, m_which):
"""Test user-data overrides system_config ntp_client"""
- system_client = 'chrony'
- sys_cfg = {'ntp_client': system_client}
- user_client = 'systemd-timesyncd'
+ system_client = "chrony"
+ sys_cfg = {"ntp_client": system_client}
+ user_client = "systemd-timesyncd"
# no clients installed
m_which.return_value = None
for distro in cc_ntp.distros:
@@ -673,114 +726,145 @@ class TestNtp(FilesystemMockingTestCase):
self.assertEqual(sorted(expected_cfg), sorted(result))
m_which.assert_has_calls([])
- @mock.patch('cloudinit.config.cc_ntp.reload_ntp')
- @mock.patch('cloudinit.config.cc_ntp.install_ntp_client')
- def test_ntp_user_provided_config_with_template(self, m_install, m_reload):
- custom = r'\n#MyCustomTemplate'
+ @mock.patch("cloudinit.config.cc_ntp.install_ntp_client")
+ def test_ntp_user_provided_config_with_template(self, m_install):
+ custom = r"\n#MyCustomTemplate"
user_template = NTP_TEMPLATE + custom
- confpath = os.path.join(self.new_root, 'etc/myntp/myntp.conf')
+ confpath = os.path.join(self.new_root, "etc/myntp/myntp.conf")
cfg = {
- 'ntp': {
- 'pools': ['mypool.org'],
- 'ntp_client': 'myntpd',
- 'config': {
- 'check_exe': 'myntpd',
- 'confpath': confpath,
- 'packages': ['myntp'],
- 'service_name': 'myntp',
- 'template': user_template,
- }
+ "ntp": {
+ "pools": ["mypool.org"],
+ "ntp_client": "myntpd",
+ "config": {
+ "check_exe": "myntpd",
+ "confpath": confpath,
+ "packages": ["myntp"],
+ "service_name": "myntp",
+ "template": user_template,
+ },
}
}
for distro in cc_ntp.distros:
mycloud = self._get_cloud(distro)
- mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR'
+ mock_path = "cloudinit.config.cc_ntp.temp_utils._TMPDIR"
with mock.patch(mock_path, self.new_root):
- cc_ntp.handle('notimportant', cfg, mycloud, None, None)
+ cc_ntp.handle("notimportant", cfg, mycloud, None, None)
self.assertEqual(
"servers []\npools ['mypool.org']\n%s" % custom,
- util.load_file(confpath))
-
- @mock.patch('cloudinit.config.cc_ntp.supplemental_schema_validation')
- @mock.patch('cloudinit.config.cc_ntp.reload_ntp')
- @mock.patch('cloudinit.config.cc_ntp.install_ntp_client')
- @mock.patch('cloudinit.config.cc_ntp.select_ntp_client')
- def test_ntp_user_provided_config_template_only(self, m_select, m_install,
- m_reload, m_schema):
+ util.load_file(confpath),
+ )
+
+ @mock.patch("cloudinit.config.cc_ntp.supplemental_schema_validation")
+ @mock.patch("cloudinit.config.cc_ntp.install_ntp_client")
+ @mock.patch("cloudinit.config.cc_ntp.select_ntp_client")
+ def test_ntp_user_provided_config_template_only(
+ self, m_select, m_install, m_schema
+ ):
"""Test custom template for default client"""
- custom = r'\n#MyCustomTemplate'
+ custom = r"\n#MyCustomTemplate"
user_template = NTP_TEMPLATE + custom
- client = 'chrony'
+ client = "chrony"
cfg = {
- 'pools': ['mypool.org'],
- 'ntp_client': client,
- 'config': {
- 'template': user_template,
- }
+ "pools": ["mypool.org"],
+ "ntp_client": client,
+ "config": {
+ "template": user_template,
+ },
}
expected_merged_cfg = {
- 'check_exe': 'chronyd',
- 'confpath': '{tmpdir}/client.conf'.format(tmpdir=self.new_root),
- 'template_name': 'client.conf', 'template': user_template,
- 'service_name': 'chrony', 'packages': ['chrony']}
+ "check_exe": "chronyd",
+ "confpath": "{tmpdir}/client.conf".format(tmpdir=self.new_root),
+ "template_name": "client.conf",
+ "template": user_template,
+ "service_name": "chrony",
+ "packages": ["chrony"],
+ }
for distro in cc_ntp.distros:
mycloud = self._get_cloud(distro)
- ntpconfig = self._mock_ntp_client_config(client=client,
- distro=distro)
- confpath = ntpconfig['confpath']
+ ntpconfig = self._mock_ntp_client_config(
+ client=client, distro=distro
+ )
+ confpath = ntpconfig["confpath"]
m_select.return_value = ntpconfig
- mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR'
+ mock_path = "cloudinit.config.cc_ntp.temp_utils._TMPDIR"
with mock.patch(mock_path, self.new_root):
- cc_ntp.handle('notimportant',
- {'ntp': cfg}, mycloud, None, None)
+ cc_ntp.handle(
+ "notimportant", {"ntp": cfg}, mycloud, None, None
+ )
self.assertEqual(
"servers []\npools ['mypool.org']\n%s" % custom,
- util.load_file(confpath))
+ util.load_file(confpath),
+ )
m_schema.assert_called_with(expected_merged_cfg)
class TestSupplementalSchemaValidation(CiTestCase):
-
def test_error_on_missing_keys(self):
"""ValueError raised reporting any missing required ntp:config keys"""
cfg = {}
- match = (r'Invalid ntp configuration:\\nMissing required ntp:config'
- ' keys: check_exe, confpath, packages, service_name')
+ match = (
+ r"Invalid ntp configuration:\\nMissing required ntp:config"
+ " keys: check_exe, confpath, packages, service_name"
+ )
with self.assertRaisesRegex(ValueError, match):
cc_ntp.supplemental_schema_validation(cfg)
def test_error_requiring_either_template_or_template_name(self):
"""ValueError raised if both template not template_name are None."""
- cfg = {'confpath': 'someconf', 'check_exe': '', 'service_name': '',
- 'template': None, 'template_name': None, 'packages': []}
- match = (r'Invalid ntp configuration:\\nEither ntp:config:template'
- ' or ntp:config:template_name values are required')
+ cfg = {
+ "confpath": "someconf",
+ "check_exe": "",
+ "service_name": "",
+ "template": None,
+ "template_name": None,
+ "packages": [],
+ }
+ match = (
+ r"Invalid ntp configuration:\\nEither ntp:config:template"
+ " or ntp:config:template_name values are required"
+ )
with self.assertRaisesRegex(ValueError, match):
cc_ntp.supplemental_schema_validation(cfg)
def test_error_on_non_list_values(self):
"""ValueError raised when packages is not of type list."""
- cfg = {'confpath': 'someconf', 'check_exe': '', 'service_name': '',
- 'template': 'asdf', 'template_name': None, 'packages': 'NOPE'}
- match = (r'Invalid ntp configuration:\\nExpected a list of required'
- ' package names for ntp:config:packages. Found \\(NOPE\\)')
+ cfg = {
+ "confpath": "someconf",
+ "check_exe": "",
+ "service_name": "",
+ "template": "asdf",
+ "template_name": None,
+ "packages": "NOPE",
+ }
+ match = (
+ r"Invalid ntp configuration:\\nExpected a list of required"
+ " package names for ntp:config:packages. Found \\(NOPE\\)"
+ )
with self.assertRaisesRegex(ValueError, match):
cc_ntp.supplemental_schema_validation(cfg)
def test_error_on_non_string_values(self):
"""ValueError raised for any values expected as string type."""
- cfg = {'confpath': 1, 'check_exe': 2, 'service_name': 3,
- 'template': 4, 'template_name': 5, 'packages': []}
+ cfg = {
+ "confpath": 1,
+ "check_exe": 2,
+ "service_name": 3,
+ "template": 4,
+ "template_name": 5,
+ "packages": [],
+ }
errors = [
- 'Expected a config file path ntp:config:confpath. Found (1)',
- 'Expected a string type for ntp:config:check_exe. Found (2)',
- 'Expected a string type for ntp:config:service_name. Found (3)',
- 'Expected a string type for ntp:config:template. Found (4)',
- 'Expected a string type for ntp:config:template_name. Found (5)']
+ "Expected a config file path ntp:config:confpath. Found (1)",
+ "Expected a string type for ntp:config:check_exe. Found (2)",
+ "Expected a string type for ntp:config:service_name. Found (3)",
+ "Expected a string type for ntp:config:template. Found (4)",
+ "Expected a string type for ntp:config:template_name. Found (5)",
+ ]
with self.assertRaises(ValueError) as context_mgr:
cc_ntp.supplemental_schema_validation(cfg)
error_msg = str(context_mgr.exception)
for error in errors:
self.assertIn(error, error_msg)
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_power_state.py b/tests/unittests/config/test_cc_power_state_change.py
index 4ac49424..47eb0d58 100644
--- a/tests/unittests/test_handler/test_handler_power_state.py
+++ b/tests/unittests/config/test_cc_power_state_change.py
@@ -2,21 +2,18 @@
import sys
+from cloudinit import distros, helpers
from cloudinit.config import cc_power_state_change as psc
-
-from cloudinit import distros
-from cloudinit import helpers
-
-from cloudinit.tests import helpers as t_help
-from cloudinit.tests.helpers import mock
+from tests.unittests import helpers as t_help
+from tests.unittests.helpers import mock
class TestLoadPowerState(t_help.TestCase):
def setUp(self):
super(TestLoadPowerState, self).setUp()
- cls = distros.fetch('ubuntu')
+ cls = distros.fetch("ubuntu")
paths = helpers.Paths({})
- self.dist = cls('ubuntu', {}, paths)
+ self.dist = cls("ubuntu", {}, paths)
def test_no_config(self):
# completely empty config should mean do nothing
@@ -25,85 +22,86 @@ class TestLoadPowerState(t_help.TestCase):
def test_irrelevant_config(self):
# no power_state field in config should return None for cmd
- (cmd, _timeout, _condition) = psc.load_power_state({'foo': 'bar'},
- self.dist)
+ (cmd, _timeout, _condition) = psc.load_power_state(
+ {"foo": "bar"}, self.dist
+ )
self.assertIsNone(cmd)
def test_invalid_mode(self):
- cfg = {'power_state': {'mode': 'gibberish'}}
+ cfg = {"power_state": {"mode": "gibberish"}}
self.assertRaises(TypeError, psc.load_power_state, cfg, self.dist)
- cfg = {'power_state': {'mode': ''}}
+ cfg = {"power_state": {"mode": ""}}
self.assertRaises(TypeError, psc.load_power_state, cfg, self.dist)
def test_empty_mode(self):
- cfg = {'power_state': {'message': 'goodbye'}}
+ cfg = {"power_state": {"message": "goodbye"}}
self.assertRaises(TypeError, psc.load_power_state, cfg, self.dist)
def test_valid_modes(self):
- cfg = {'power_state': {}}
- for mode in ('halt', 'poweroff', 'reboot'):
- cfg['power_state']['mode'] = mode
+ cfg = {"power_state": {}}
+ for mode in ("halt", "poweroff", "reboot"):
+ cfg["power_state"]["mode"] = mode
check_lps_ret(psc.load_power_state(cfg, self.dist), mode=mode)
def test_invalid_delay(self):
- cfg = {'power_state': {'mode': 'poweroff', 'delay': 'goodbye'}}
+ cfg = {"power_state": {"mode": "poweroff", "delay": "goodbye"}}
self.assertRaises(TypeError, psc.load_power_state, cfg, self.dist)
def test_valid_delay(self):
- cfg = {'power_state': {'mode': 'poweroff', 'delay': ''}}
+ cfg = {"power_state": {"mode": "poweroff", "delay": ""}}
for delay in ("now", "+1", "+30"):
- cfg['power_state']['delay'] = delay
+ cfg["power_state"]["delay"] = delay
check_lps_ret(psc.load_power_state(cfg, self.dist))
def test_message_present(self):
- cfg = {'power_state': {'mode': 'poweroff', 'message': 'GOODBYE'}}
+ cfg = {"power_state": {"mode": "poweroff", "message": "GOODBYE"}}
ret = psc.load_power_state(cfg, self.dist)
check_lps_ret(psc.load_power_state(cfg, self.dist))
- self.assertIn(cfg['power_state']['message'], ret[0])
+ self.assertIn(cfg["power_state"]["message"], ret[0])
def test_no_message(self):
# if message is not present, then no argument should be passed for it
- cfg = {'power_state': {'mode': 'poweroff'}}
+ cfg = {"power_state": {"mode": "poweroff"}}
(cmd, _timeout, _condition) = psc.load_power_state(cfg, self.dist)
self.assertNotIn("", cmd)
check_lps_ret(psc.load_power_state(cfg, self.dist))
self.assertTrue(len(cmd) == 3)
def test_condition_null_raises(self):
- cfg = {'power_state': {'mode': 'poweroff', 'condition': None}}
+ cfg = {"power_state": {"mode": "poweroff", "condition": None}}
self.assertRaises(TypeError, psc.load_power_state, cfg, self.dist)
def test_condition_default_is_true(self):
- cfg = {'power_state': {'mode': 'poweroff'}}
+ cfg = {"power_state": {"mode": "poweroff"}}
_cmd, _timeout, cond = psc.load_power_state(cfg, self.dist)
self.assertEqual(cond, True)
def test_freebsd_poweroff_uses_lowercase_p(self):
- cls = distros.fetch('freebsd')
+ cls = distros.fetch("freebsd")
paths = helpers.Paths({})
- freebsd = cls('freebsd', {}, paths)
- cfg = {'power_state': {'mode': 'poweroff'}}
+ freebsd = cls("freebsd", {}, paths)
+ cfg = {"power_state": {"mode": "poweroff"}}
ret = psc.load_power_state(cfg, freebsd)
- self.assertIn('-p', ret[0])
+ self.assertIn("-p", ret[0])
def test_alpine_delay(self):
# alpine takes delay in seconds.
- cls = distros.fetch('alpine')
+ cls = distros.fetch("alpine")
paths = helpers.Paths({})
- alpine = cls('alpine', {}, paths)
- cfg = {'power_state': {'mode': 'poweroff', 'delay': ''}}
- for delay, value in (('now', 0), ("+1", 60), ("+30", 1800)):
- cfg['power_state']['delay'] = delay
+ alpine = cls("alpine", {}, paths)
+ cfg = {"power_state": {"mode": "poweroff", "delay": ""}}
+ for delay, value in (("now", 0), ("+1", 60), ("+30", 1800)):
+ cfg["power_state"]["delay"] = delay
ret = psc.load_power_state(cfg, alpine)
- self.assertEqual('-d', ret[0][1])
+ self.assertEqual("-d", ret[0][1])
self.assertEqual(str(value), ret[0][2])
class TestCheckCondition(t_help.TestCase):
def cmd_with_exit(self, rc):
- return([sys.executable, '-c', 'import sys; sys.exit(%s)' % rc])
+ return [sys.executable, "-c", "import sys; sys.exit(%s)" % rc]
def test_true_is_true(self):
self.assertEqual(psc.check_condition(True), True)
@@ -120,7 +118,8 @@ class TestCheckCondition(t_help.TestCase):
def test_cmd_exit_nonzero_warns(self):
mocklog = mock.Mock()
self.assertEqual(
- psc.check_condition(self.cmd_with_exit(2), mocklog), False)
+ psc.check_condition(self.cmd_with_exit(2), mocklog), False
+ )
self.assertEqual(mocklog.warning.call_count, 1)
@@ -133,14 +132,14 @@ def check_lps_ret(psc_return, mode=None):
timeout = psc_return[1]
condition = psc_return[2]
- if 'shutdown' not in psc_return[0][0]:
+ if "shutdown" not in psc_return[0][0]:
errs.append("string 'shutdown' not in cmd")
if condition is None:
errs.append("condition was not returned")
if mode is not None:
- opt = {'halt': '-H', 'poweroff': '-P', 'reboot': '-r'}[mode]
+ opt = {"halt": "-H", "poweroff": "-P", "reboot": "-r"}[mode]
if opt not in psc_return[0]:
errs.append("opt '%s' not in cmd: %s" % (opt, cmd))
@@ -154,6 +153,7 @@ def check_lps_ret(psc_return, mode=None):
if len(errs):
lines = ["Errors in result: %s" % str(psc_return)] + errs
- raise Exception('\n'.join(lines))
+ raise Exception("\n".join(lines))
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_puppet.py b/tests/unittests/config/test_cc_puppet.py
new file mode 100644
index 00000000..2c4481da
--- /dev/null
+++ b/tests/unittests/config/test_cc_puppet.py
@@ -0,0 +1,450 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+import logging
+import textwrap
+
+from cloudinit import util
+from cloudinit.config import cc_puppet
+from tests.unittests.helpers import CiTestCase, HttprettyTestCase, mock
+from tests.unittests.util import get_cloud
+
+LOG = logging.getLogger(__name__)
+
+
+@mock.patch("cloudinit.config.cc_puppet.subp.subp")
+@mock.patch("cloudinit.config.cc_puppet.os")
+class TestAutostartPuppet(CiTestCase):
+ def test_wb_autostart_puppet_updates_puppet_default(self, m_os, m_subp):
+ """Update /etc/default/puppet to autostart if it exists."""
+
+ def _fake_exists(path):
+ return path == "/etc/default/puppet"
+
+ m_os.path.exists.side_effect = _fake_exists
+ cc_puppet._autostart_puppet(LOG)
+ self.assertEqual(
+ [
+ mock.call(
+ [
+ "sed",
+ "-i",
+ "-e",
+ "s/^START=.*/START=yes/",
+ "/etc/default/puppet",
+ ],
+ capture=False,
+ )
+ ],
+ m_subp.call_args_list,
+ )
+
+ def test_wb_autostart_pupppet_enables_puppet_systemctl(self, m_os, m_subp):
+ """If systemctl is present, enable puppet via systemctl."""
+
+ def _fake_exists(path):
+ return path == "/bin/systemctl"
+
+ m_os.path.exists.side_effect = _fake_exists
+ cc_puppet._autostart_puppet(LOG)
+ expected_calls = [
+ mock.call(
+ ["/bin/systemctl", "enable", "puppet.service"], capture=False
+ )
+ ]
+ self.assertEqual(expected_calls, m_subp.call_args_list)
+
+ def test_wb_autostart_pupppet_enables_puppet_chkconfig(self, m_os, m_subp):
+ """If chkconfig is present, enable puppet via checkcfg."""
+
+ def _fake_exists(path):
+ return path == "/sbin/chkconfig"
+
+ m_os.path.exists.side_effect = _fake_exists
+ cc_puppet._autostart_puppet(LOG)
+ expected_calls = [
+ mock.call(["/sbin/chkconfig", "puppet", "on"], capture=False)
+ ]
+ self.assertEqual(expected_calls, m_subp.call_args_list)
+
+
+@mock.patch("cloudinit.config.cc_puppet._autostart_puppet")
+class TestPuppetHandle(CiTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestPuppetHandle, self).setUp()
+ self.new_root = self.tmp_dir()
+ self.conf = self.tmp_path("puppet.conf")
+ self.csr_attributes_path = self.tmp_path("csr_attributes.yaml")
+ self.cloud = get_cloud()
+
+ def test_skips_missing_puppet_key_in_cloudconfig(self, m_auto):
+ """Cloud-config containing no 'puppet' key is skipped."""
+
+ cfg = {}
+ cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ self.assertIn("no 'puppet' configuration found", self.logs.getvalue())
+ self.assertEqual(0, m_auto.call_count)
+
+ @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
+ def test_puppet_config_starts_puppet_service(self, m_subp, m_auto):
+ """Cloud-config 'puppet' configuration starts puppet."""
+
+ cfg = {"puppet": {"install": False}}
+ cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ self.assertEqual(1, m_auto.call_count)
+ self.assertIn(
+ [mock.call(["service", "puppet", "start"], capture=False)],
+ m_subp.call_args_list,
+ )
+
+ @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
+ def test_empty_puppet_config_installs_puppet(self, m_subp, m_auto):
+ """Cloud-config empty 'puppet' configuration installs latest puppet."""
+
+ self.cloud.distro = mock.MagicMock()
+ cfg = {"puppet": {}}
+ cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ self.assertEqual(
+ [mock.call(("puppet", None))],
+ self.cloud.distro.install_packages.call_args_list,
+ )
+
+ @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
+ def test_puppet_config_installs_puppet_on_true(self, m_subp, _):
+ """Cloud-config with 'puppet' key installs when 'install' is True."""
+
+ self.cloud.distro = mock.MagicMock()
+ cfg = {"puppet": {"install": True}}
+ cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ self.assertEqual(
+ [mock.call(("puppet", None))],
+ self.cloud.distro.install_packages.call_args_list,
+ )
+
+ @mock.patch("cloudinit.config.cc_puppet.install_puppet_aio", autospec=True)
+ @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
+ def test_puppet_config_installs_puppet_aio(self, m_subp, m_aio, _):
+ """Cloud-config with 'puppet' key installs
+ when 'install_type' is 'aio'."""
+
+ self.cloud.distro = mock.MagicMock()
+ cfg = {"puppet": {"install": True, "install_type": "aio"}}
+ cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ m_aio.assert_called_with(cc_puppet.AIO_INSTALL_URL, None, None, True)
+
+ @mock.patch("cloudinit.config.cc_puppet.install_puppet_aio", autospec=True)
+ @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
+ def test_puppet_config_installs_puppet_aio_with_version(
+ self, m_subp, m_aio, _
+ ):
+ """Cloud-config with 'puppet' key installs
+ when 'install_type' is 'aio' and 'version' is specified."""
+
+ self.cloud.distro = mock.MagicMock()
+ cfg = {
+ "puppet": {
+ "install": True,
+ "version": "6.24.0",
+ "install_type": "aio",
+ }
+ }
+ cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ m_aio.assert_called_with(
+ cc_puppet.AIO_INSTALL_URL, "6.24.0", None, True
+ )
+
+ @mock.patch("cloudinit.config.cc_puppet.install_puppet_aio", autospec=True)
+ @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
+ def test_puppet_config_installs_puppet_aio_with_collection(
+ self, m_subp, m_aio, _
+ ):
+ """Cloud-config with 'puppet' key installs
+ when 'install_type' is 'aio' and 'collection' is specified."""
+
+ self.cloud.distro = mock.MagicMock()
+ cfg = {
+ "puppet": {
+ "install": True,
+ "collection": "puppet6",
+ "install_type": "aio",
+ }
+ }
+ cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ m_aio.assert_called_with(
+ cc_puppet.AIO_INSTALL_URL, None, "puppet6", True
+ )
+
+ @mock.patch("cloudinit.config.cc_puppet.install_puppet_aio", autospec=True)
+ @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
+ def test_puppet_config_installs_puppet_aio_with_custom_url(
+ self, m_subp, m_aio, _
+ ):
+ """Cloud-config with 'puppet' key installs
+ when 'install_type' is 'aio' and 'aio_install_url' is specified."""
+
+ self.cloud.distro = mock.MagicMock()
+ cfg = {
+ "puppet": {
+ "install": True,
+ "aio_install_url": "http://test.url/path/to/script.sh",
+ "install_type": "aio",
+ }
+ }
+ cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ m_aio.assert_called_with(
+ "http://test.url/path/to/script.sh", None, None, True
+ )
+
+ @mock.patch("cloudinit.config.cc_puppet.install_puppet_aio", autospec=True)
+ @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
+ def test_puppet_config_installs_puppet_aio_without_cleanup(
+ self, m_subp, m_aio, _
+ ):
+ """Cloud-config with 'puppet' key installs
+ when 'install_type' is 'aio' and no cleanup."""
+
+ self.cloud.distro = mock.MagicMock()
+ cfg = {
+ "puppet": {
+ "install": True,
+ "cleanup": False,
+ "install_type": "aio",
+ }
+ }
+ cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ m_aio.assert_called_with(cc_puppet.AIO_INSTALL_URL, None, None, False)
+
+ @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
+ def test_puppet_config_installs_puppet_version(self, m_subp, _):
+ """Cloud-config 'puppet' configuration can specify a version."""
+
+ self.cloud.distro = mock.MagicMock()
+ cfg = {"puppet": {"version": "3.8"}}
+ cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ self.assertEqual(
+ [mock.call(("puppet", "3.8"))],
+ self.cloud.distro.install_packages.call_args_list,
+ )
+
+ @mock.patch("cloudinit.config.cc_puppet.get_config_value")
+ @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
+ def test_puppet_config_updates_puppet_conf(
+ self, m_subp, m_default, m_auto
+ ):
+ """When 'conf' is provided update values in PUPPET_CONF_PATH."""
+
+ def _fake_get_config_value(puppet_bin, setting):
+ return self.conf
+
+ m_default.side_effect = _fake_get_config_value
+
+ cfg = {
+ "puppet": {
+ "conf": {"agent": {"server": "puppetserver.example.org"}}
+ }
+ }
+ util.write_file(self.conf, "[agent]\nserver = origpuppet\nother = 3")
+ self.cloud.distro = mock.MagicMock()
+ cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ content = util.load_file(self.conf)
+ expected = "[agent]\nserver = puppetserver.example.org\nother = 3\n\n"
+ self.assertEqual(expected, content)
+
+ @mock.patch("cloudinit.config.cc_puppet.get_config_value")
+ @mock.patch("cloudinit.config.cc_puppet.subp.subp")
+ def test_puppet_writes_csr_attributes_file(
+ self, m_subp, m_default, m_auto
+ ):
+ """When csr_attributes is provided
+ creates file in PUPPET_CSR_ATTRIBUTES_PATH."""
+
+ def _fake_get_config_value(puppet_bin, setting):
+ return self.csr_attributes_path
+
+ m_default.side_effect = _fake_get_config_value
+
+ self.cloud.distro = mock.MagicMock()
+ cfg = {
+ "puppet": {
+ "csr_attributes": {
+ "custom_attributes": {
+ "1.2.840.113549.1.9.7": (
+ "342thbjkt82094y0uthhor289jnqthpc2290"
+ )
+ },
+ "extension_requests": {
+ "pp_uuid": "ED803750-E3C7-44F5-BB08-41A04433FE2E",
+ "pp_image_name": "my_ami_image",
+ "pp_preshared_key": (
+ "342thbjkt82094y0uthhor289jnqthpc2290"
+ ),
+ },
+ }
+ }
+ }
+ cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ content = util.load_file(self.csr_attributes_path)
+ expected = textwrap.dedent(
+ """\
+ custom_attributes:
+ 1.2.840.113549.1.9.7: 342thbjkt82094y0uthhor289jnqthpc2290
+ extension_requests:
+ pp_image_name: my_ami_image
+ pp_preshared_key: 342thbjkt82094y0uthhor289jnqthpc2290
+ pp_uuid: ED803750-E3C7-44F5-BB08-41A04433FE2E
+ """
+ )
+ self.assertEqual(expected, content)
+
+ @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
+ def test_puppet_runs_puppet_if_requested(self, m_subp, m_auto):
+ """Run puppet with default args if 'exec' is set to True."""
+
+ cfg = {"puppet": {"exec": True}}
+ cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ self.assertEqual(1, m_auto.call_count)
+ self.assertIn(
+ [mock.call(["puppet", "agent", "--test"], capture=False)],
+ m_subp.call_args_list,
+ )
+
+ @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
+ def test_puppet_starts_puppetd(self, m_subp, m_auto):
+ """Run puppet with default args if 'exec' is set to True."""
+
+ cfg = {"puppet": {}}
+ cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ self.assertEqual(1, m_auto.call_count)
+ self.assertIn(
+ [mock.call(["service", "puppet", "start"], capture=False)],
+ m_subp.call_args_list,
+ )
+
+ @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
+ def test_puppet_skips_puppetd(self, m_subp, m_auto):
+ """Run puppet with default args if 'exec' is set to True."""
+
+ cfg = {"puppet": {"start_service": False}}
+ cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ self.assertEqual(0, m_auto.call_count)
+ self.assertNotIn(
+ [mock.call(["service", "puppet", "start"], capture=False)],
+ m_subp.call_args_list,
+ )
+
+ @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
+ def test_puppet_runs_puppet_with_args_list_if_requested(
+ self, m_subp, m_auto
+ ):
+ """Run puppet with 'exec_args' list if 'exec' is set to True."""
+
+ cfg = {
+ "puppet": {
+ "exec": True,
+ "exec_args": ["--onetime", "--detailed-exitcodes"],
+ }
+ }
+ cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ self.assertEqual(1, m_auto.call_count)
+ self.assertIn(
+ [
+ mock.call(
+ ["puppet", "agent", "--onetime", "--detailed-exitcodes"],
+ capture=False,
+ )
+ ],
+ m_subp.call_args_list,
+ )
+
+ @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
+ def test_puppet_runs_puppet_with_args_string_if_requested(
+ self, m_subp, m_auto
+ ):
+ """Run puppet with 'exec_args' string if 'exec' is set to True."""
+
+ cfg = {
+ "puppet": {
+ "exec": True,
+ "exec_args": "--onetime --detailed-exitcodes",
+ }
+ }
+ cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ self.assertEqual(1, m_auto.call_count)
+ self.assertIn(
+ [
+ mock.call(
+ ["puppet", "agent", "--onetime", "--detailed-exitcodes"],
+ capture=False,
+ )
+ ],
+ m_subp.call_args_list,
+ )
+
+
+URL_MOCK = mock.Mock()
+URL_MOCK.contents = b'#!/bin/bash\necho "Hi Mom"'
+
+
+@mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=(None, None))
+@mock.patch(
+ "cloudinit.config.cc_puppet.url_helper.readurl",
+ return_value=URL_MOCK,
+ autospec=True,
+)
+class TestInstallPuppetAio(HttprettyTestCase):
+ def test_install_with_default_arguments(self, m_readurl, m_subp):
+ """Install AIO with no arguments"""
+ cc_puppet.install_puppet_aio()
+
+ self.assertEqual(
+ [mock.call([mock.ANY, "--cleanup"], capture=False)],
+ m_subp.call_args_list,
+ )
+
+ def test_install_with_custom_url(self, m_readurl, m_subp):
+ """Install AIO from custom URL"""
+ cc_puppet.install_puppet_aio("http://custom.url/path/to/script.sh")
+ m_readurl.assert_called_with(
+ url="http://custom.url/path/to/script.sh", retries=5
+ )
+
+ self.assertEqual(
+ [mock.call([mock.ANY, "--cleanup"], capture=False)],
+ m_subp.call_args_list,
+ )
+
+ def test_install_with_version(self, m_readurl, m_subp):
+ """Install AIO with specific version"""
+ cc_puppet.install_puppet_aio(cc_puppet.AIO_INSTALL_URL, "7.6.0")
+
+ self.assertEqual(
+ [mock.call([mock.ANY, "-v", "7.6.0", "--cleanup"], capture=False)],
+ m_subp.call_args_list,
+ )
+
+ def test_install_with_collection(self, m_readurl, m_subp):
+ """Install AIO with specific collection"""
+ cc_puppet.install_puppet_aio(
+ cc_puppet.AIO_INSTALL_URL, None, "puppet6-nightly"
+ )
+
+ self.assertEqual(
+ [
+ mock.call(
+ [mock.ANY, "-c", "puppet6-nightly", "--cleanup"],
+ capture=False,
+ )
+ ],
+ m_subp.call_args_list,
+ )
+
+ def test_install_with_no_cleanup(self, m_readurl, m_subp):
+ """Install AIO with no cleanup"""
+ cc_puppet.install_puppet_aio(
+ cc_puppet.AIO_INSTALL_URL, None, None, False
+ )
+
+ self.assertEqual(
+ [mock.call([mock.ANY], capture=False)], m_subp.call_args_list
+ )
diff --git a/tests/unittests/config/test_cc_refresh_rmc_and_interface.py b/tests/unittests/config/test_cc_refresh_rmc_and_interface.py
new file mode 100644
index 00000000..e038f814
--- /dev/null
+++ b/tests/unittests/config/test_cc_refresh_rmc_and_interface.py
@@ -0,0 +1,157 @@
+import logging
+from textwrap import dedent
+
+from cloudinit import util
+from cloudinit.config import cc_refresh_rmc_and_interface as ccrmci
+from tests.unittests import helpers as t_help
+from tests.unittests.helpers import mock
+
+LOG = logging.getLogger(__name__)
+MPATH = "cloudinit.config.cc_refresh_rmc_and_interface"
+NET_INFO = {
+ "lo": {
+ "ipv4": [
+ {
+ "ip": "127.0.0.1",
+ "bcast": "",
+ "mask": "255.0.0.0",
+ "scope": "host",
+ }
+ ],
+ "ipv6": [{"ip": "::1/128", "scope6": "host"}],
+ "hwaddr": "",
+ "up": "True",
+ },
+ "env2": {
+ "ipv4": [
+ {
+ "ip": "8.0.0.19",
+ "bcast": "8.0.0.255",
+ "mask": "255.255.255.0",
+ "scope": "global",
+ }
+ ],
+ "ipv6": [{"ip": "fe80::f896:c2ff:fe81:8220/64", "scope6": "link"}],
+ "hwaddr": "fa:96:c2:81:82:20",
+ "up": "True",
+ },
+ "env3": {
+ "ipv4": [
+ {
+ "ip": "90.0.0.14",
+ "bcast": "90.0.0.255",
+ "mask": "255.255.255.0",
+ "scope": "global",
+ }
+ ],
+ "ipv6": [{"ip": "fe80::f896:c2ff:fe81:8221/64", "scope6": "link"}],
+ "hwaddr": "fa:96:c2:81:82:21",
+ "up": "True",
+ },
+ "env4": {
+ "ipv4": [
+ {
+ "ip": "9.114.23.7",
+ "bcast": "9.114.23.255",
+ "mask": "255.255.255.0",
+ "scope": "global",
+ }
+ ],
+ "ipv6": [{"ip": "fe80::f896:c2ff:fe81:8222/64", "scope6": "link"}],
+ "hwaddr": "fa:96:c2:81:82:22",
+ "up": "True",
+ },
+ "env5": {
+ "ipv4": [],
+ "ipv6": [{"ip": "fe80::9c26:c3ff:fea4:62c8/64", "scope6": "link"}],
+ "hwaddr": "42:20:86:df:fa:4c",
+ "up": "True",
+ },
+}
+
+
+class TestRsctNodeFile(t_help.CiTestCase):
+ def test_disable_ipv6_interface(self):
+ """test parsing of iface files."""
+ fname = self.tmp_path("iface-eth5")
+ util.write_file(
+ fname,
+ dedent(
+ """\
+ BOOTPROTO=static
+ DEVICE=eth5
+ HWADDR=42:20:86:df:fa:4c
+ IPV6INIT=yes
+ IPADDR6=fe80::9c26:c3ff:fea4:62c8/64
+ IPV6ADDR=fe80::9c26:c3ff:fea4:62c8/64
+ NM_CONTROLLED=yes
+ ONBOOT=yes
+ STARTMODE=auto
+ TYPE=Ethernet
+ USERCTL=no
+ """
+ ),
+ )
+
+ ccrmci.disable_ipv6(fname)
+ self.assertEqual(
+ dedent(
+ """\
+ BOOTPROTO=static
+ DEVICE=eth5
+ HWADDR=42:20:86:df:fa:4c
+ ONBOOT=yes
+ STARTMODE=auto
+ TYPE=Ethernet
+ USERCTL=no
+ NM_CONTROLLED=no
+ """
+ ),
+ util.load_file(fname),
+ )
+
+ @mock.patch(MPATH + ".refresh_rmc")
+ @mock.patch(MPATH + ".restart_network_manager")
+ @mock.patch(MPATH + ".disable_ipv6")
+ @mock.patch(MPATH + ".refresh_ipv6")
+ @mock.patch(MPATH + ".netinfo.netdev_info")
+ @mock.patch(MPATH + ".subp.which")
+ def test_handle(
+ self,
+ m_refresh_rmc,
+ m_netdev_info,
+ m_refresh_ipv6,
+ m_disable_ipv6,
+ m_restart_nm,
+ m_which,
+ ):
+ """Basic test of handle."""
+ m_netdev_info.return_value = NET_INFO
+ m_which.return_value = "/opt/rsct/bin/rmcctrl"
+ ccrmci.handle("refresh_rmc_and_interface", None, None, None, None)
+ self.assertEqual(1, m_netdev_info.call_count)
+ m_refresh_ipv6.assert_called_with("env5")
+ m_disable_ipv6.assert_called_with(
+ "/etc/sysconfig/network-scripts/ifcfg-env5"
+ )
+ self.assertEqual(1, m_restart_nm.call_count)
+ self.assertEqual(1, m_refresh_rmc.call_count)
+
+ @mock.patch(MPATH + ".netinfo.netdev_info")
+ def test_find_ipv6(self, m_netdev_info):
+ """find_ipv6_ifaces parses netdev_info returning those with ipv6"""
+ m_netdev_info.return_value = NET_INFO
+ found = ccrmci.find_ipv6_ifaces()
+ self.assertEqual(["env5"], found)
+
+ @mock.patch(MPATH + ".subp.subp")
+ def test_refresh_ipv6(self, m_subp):
+ """refresh_ipv6 should ip down and up the interface."""
+ iface = "myeth0"
+ ccrmci.refresh_ipv6(iface)
+ m_subp.assert_has_calls(
+ [
+ mock.call(["ip", "link", "set", iface, "down"]),
+ mock.call(["ip", "link", "set", iface, "up"]),
+ ]
+ )
diff --git a/tests/unittests/config/test_cc_resizefs.py b/tests/unittests/config/test_cc_resizefs.py
new file mode 100644
index 00000000..9981dcea
--- /dev/null
+++ b/tests/unittests/config/test_cc_resizefs.py
@@ -0,0 +1,490 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import logging
+from collections import namedtuple
+
+from cloudinit.config.cc_resizefs import (
+ _resize_btrfs,
+ _resize_ext,
+ _resize_ufs,
+ _resize_xfs,
+ _resize_zfs,
+ can_skip_resize,
+ handle,
+ maybe_get_writable_device_path,
+)
+from cloudinit.subp import ProcessExecutionError
+from tests.unittests.helpers import (
+ CiTestCase,
+ mock,
+ skipUnlessJsonSchema,
+ util,
+ wrap_and_call,
+)
+
+LOG = logging.getLogger(__name__)
+
+
+class TestResizefs(CiTestCase):
+ with_logs = True
+
+ def setUp(self):
+ super(TestResizefs, self).setUp()
+ self.name = "resizefs"
+
+ @mock.patch("cloudinit.subp.subp")
+ def test_skip_ufs_resize(self, m_subp):
+ fs_type = "ufs"
+ resize_what = "/"
+ devpth = "/dev/da0p2"
+ err = (
+ "growfs: requested size 2.0GB is not larger than the "
+ "current filesystem size 2.0GB\n"
+ )
+ exception = ProcessExecutionError(stderr=err, exit_code=1)
+ m_subp.side_effect = exception
+ res = can_skip_resize(fs_type, resize_what, devpth)
+ self.assertTrue(res)
+
+ @mock.patch("cloudinit.subp.subp")
+ def test_cannot_skip_ufs_resize(self, m_subp):
+ fs_type = "ufs"
+ resize_what = "/"
+ devpth = "/dev/da0p2"
+ m_subp.return_value = (
+ "stdout: super-block backups (for fsck_ffs -b #) at:\n\n",
+ "growfs: no room to allocate last cylinder group; "
+ "leaving 364KB unused\n",
+ )
+ res = can_skip_resize(fs_type, resize_what, devpth)
+ self.assertFalse(res)
+
+ @mock.patch("cloudinit.subp.subp")
+ def test_cannot_skip_ufs_growfs_exception(self, m_subp):
+ fs_type = "ufs"
+ resize_what = "/"
+ devpth = "/dev/da0p2"
+ err = "growfs: /dev/da0p2 is not clean - run fsck.\n"
+ exception = ProcessExecutionError(stderr=err, exit_code=1)
+ m_subp.side_effect = exception
+ with self.assertRaises(ProcessExecutionError):
+ can_skip_resize(fs_type, resize_what, devpth)
+
+ def test_can_skip_resize_ext(self):
+ self.assertFalse(can_skip_resize("ext", "/", "/dev/sda1"))
+
+ def test_handle_noops_on_disabled(self):
+ """The handle function logs when the configuration disables resize."""
+ cfg = {"resize_rootfs": False}
+ handle("cc_resizefs", cfg, _cloud=None, log=LOG, args=[])
+ self.assertIn(
+ "DEBUG: Skipping module named cc_resizefs, resizing disabled\n",
+ self.logs.getvalue(),
+ )
+
+ @skipUnlessJsonSchema()
+ def test_handle_schema_validation_logs_invalid_resize_rootfs_value(self):
+ """The handle reports json schema violations as a warning.
+
+ Invalid values for resize_rootfs result in disabling the module.
+ """
+ cfg = {"resize_rootfs": "junk"}
+ handle("cc_resizefs", cfg, _cloud=None, log=LOG, args=[])
+ logs = self.logs.getvalue()
+ self.assertIn(
+ "WARNING: Invalid cloud-config provided:\nresize_rootfs: 'junk' is"
+ " not one of [True, False, 'noblock']",
+ logs,
+ )
+ self.assertIn(
+ "DEBUG: Skipping module named cc_resizefs, resizing disabled\n",
+ logs,
+ )
+
+ @mock.patch("cloudinit.config.cc_resizefs.util.get_mount_info")
+ def test_handle_warns_on_unknown_mount_info(self, m_get_mount_info):
+ """handle warns when get_mount_info sees unknown filesystem for /."""
+ m_get_mount_info.return_value = None
+ cfg = {"resize_rootfs": True}
+ handle("cc_resizefs", cfg, _cloud=None, log=LOG, args=[])
+ logs = self.logs.getvalue()
+ self.assertNotIn(
+ "WARNING: Invalid cloud-config provided:\nresize_rootfs:", logs
+ )
+ self.assertIn(
+ "WARNING: Could not determine filesystem type of /\n", logs
+ )
+ self.assertEqual(
+ [mock.call("/", LOG)], m_get_mount_info.call_args_list
+ )
+
+ def test_handle_warns_on_undiscoverable_root_path_in_commandline(self):
+ """handle noops when the root path is not found on the commandline."""
+ cfg = {"resize_rootfs": True}
+ exists_mock_path = "cloudinit.config.cc_resizefs.os.path.exists"
+
+ def fake_mount_info(path, log):
+ self.assertEqual("/", path)
+ self.assertEqual(LOG, log)
+ return ("/dev/root", "ext4", "/")
+
+ with mock.patch(exists_mock_path) as m_exists:
+ m_exists.return_value = False
+ wrap_and_call(
+ "cloudinit.config.cc_resizefs.util",
+ {
+ "is_container": {"return_value": False},
+ "get_mount_info": {"side_effect": fake_mount_info},
+ "get_cmdline": {"return_value": "BOOT_IMAGE=/vmlinuz.efi"},
+ },
+ handle,
+ "cc_resizefs",
+ cfg,
+ _cloud=None,
+ log=LOG,
+ args=[],
+ )
+ logs = self.logs.getvalue()
+ self.assertIn("WARNING: Unable to find device '/dev/root'", logs)
+
+ def test_resize_zfs_cmd_return(self):
+ zpool = "zroot"
+ devpth = "gpt/system"
+ self.assertEqual(
+ ("zpool", "online", "-e", zpool, devpth),
+ _resize_zfs(zpool, devpth),
+ )
+
+ def test_resize_xfs_cmd_return(self):
+ mount_point = "/mnt/test"
+ devpth = "/dev/sda1"
+ self.assertEqual(
+ ("xfs_growfs", mount_point), _resize_xfs(mount_point, devpth)
+ )
+
+ def test_resize_ext_cmd_return(self):
+ mount_point = "/"
+ devpth = "/dev/sdb1"
+ self.assertEqual(
+ ("resize2fs", devpth), _resize_ext(mount_point, devpth)
+ )
+
+ def test_resize_ufs_cmd_return(self):
+ mount_point = "/"
+ devpth = "/dev/sda2"
+ self.assertEqual(
+ ("growfs", "-y", mount_point), _resize_ufs(mount_point, devpth)
+ )
+
+ @mock.patch("cloudinit.util.is_container", return_value=False)
+ @mock.patch("cloudinit.util.parse_mount")
+ @mock.patch("cloudinit.util.get_device_info_from_zpool")
+ @mock.patch("cloudinit.util.get_mount_info")
+ def test_handle_zfs_root(
+ self, mount_info, zpool_info, parse_mount, is_container
+ ):
+ devpth = "vmzroot/ROOT/freebsd"
+ disk = "gpt/system"
+ fs_type = "zfs"
+ mount_point = "/"
+
+ mount_info.return_value = (devpth, fs_type, mount_point)
+ zpool_info.return_value = disk
+ parse_mount.return_value = (devpth, fs_type, mount_point)
+
+ cfg = {"resize_rootfs": True}
+
+ with mock.patch("cloudinit.config.cc_resizefs.do_resize") as dresize:
+ handle("cc_resizefs", cfg, _cloud=None, log=LOG, args=[])
+ ret = dresize.call_args[0][0]
+
+ self.assertEqual(("zpool", "online", "-e", "vmzroot", disk), ret)
+
+ @mock.patch("cloudinit.util.is_container", return_value=False)
+ @mock.patch("cloudinit.util.get_mount_info")
+ @mock.patch("cloudinit.util.get_device_info_from_zpool")
+ @mock.patch("cloudinit.util.parse_mount")
+ def test_handle_modern_zfsroot(
+ self, mount_info, zpool_info, parse_mount, is_container
+ ):
+ devpth = "zroot/ROOT/default"
+ disk = "da0p3"
+ fs_type = "zfs"
+ mount_point = "/"
+
+ mount_info.return_value = (devpth, fs_type, mount_point)
+ zpool_info.return_value = disk
+ parse_mount.return_value = (devpth, fs_type, mount_point)
+
+ cfg = {"resize_rootfs": True}
+
+ def fake_stat(devpath):
+ if devpath == disk:
+ raise OSError("not here")
+ FakeStat = namedtuple(
+ "FakeStat", ["st_mode", "st_size", "st_mtime"]
+ ) # minimal stat
+ return FakeStat(25008, 0, 1) # fake char block device
+
+ with mock.patch("cloudinit.config.cc_resizefs.do_resize") as dresize:
+ with mock.patch("cloudinit.config.cc_resizefs.os.stat") as m_stat:
+ m_stat.side_effect = fake_stat
+ handle("cc_resizefs", cfg, _cloud=None, log=LOG, args=[])
+
+ self.assertEqual(
+ ("zpool", "online", "-e", "zroot", "/dev/" + disk),
+ dresize.call_args[0][0],
+ )
+
+
+class TestRootDevFromCmdline(CiTestCase):
+ def test_rootdev_from_cmdline_with_no_root(self):
+ """Return None from rootdev_from_cmdline when root is not present."""
+ invalid_cases = [
+ "BOOT_IMAGE=/adsf asdfa werasef root adf",
+ "BOOT_IMAGE=/adsf",
+ "",
+ ]
+ for case in invalid_cases:
+ self.assertIsNone(util.rootdev_from_cmdline(case))
+
+ def test_rootdev_from_cmdline_with_root_startswith_dev(self):
+ """Return the cmdline root when the path starts with /dev."""
+ self.assertEqual(
+ "/dev/this", util.rootdev_from_cmdline("asdf root=/dev/this")
+ )
+
+ def test_rootdev_from_cmdline_with_root_without_dev_prefix(self):
+ """Add /dev prefix to cmdline root when the path lacks the prefix."""
+ self.assertEqual(
+ "/dev/this", util.rootdev_from_cmdline("asdf root=this")
+ )
+
+ def test_rootdev_from_cmdline_with_root_with_label(self):
+ """When cmdline root contains a LABEL, our root is disk/by-label."""
+ self.assertEqual(
+ "/dev/disk/by-label/unique",
+ util.rootdev_from_cmdline("asdf root=LABEL=unique"),
+ )
+
+ def test_rootdev_from_cmdline_with_root_with_uuid(self):
+ """When cmdline root contains a UUID, our root is disk/by-uuid."""
+ self.assertEqual(
+ "/dev/disk/by-uuid/adsfdsaf-adsf",
+ util.rootdev_from_cmdline("asdf root=UUID=adsfdsaf-adsf"),
+ )
+
+
+class TestMaybeGetDevicePathAsWritableBlock(CiTestCase):
+
+ with_logs = True
+
+ def test_maybe_get_writable_device_path_none_on_overlayroot(self):
+ """When devpath is overlayroot (on MAAS), is_dev_writable is False."""
+ info = "does not matter"
+ devpath = wrap_and_call(
+ "cloudinit.config.cc_resizefs.util",
+ {"is_container": {"return_value": False}},
+ maybe_get_writable_device_path,
+ "overlayroot",
+ info,
+ LOG,
+ )
+ self.assertIsNone(devpath)
+ self.assertIn(
+ "Not attempting to resize devpath 'overlayroot'",
+ self.logs.getvalue(),
+ )
+
+ def test_maybe_get_writable_device_path_warns_missing_cmdline_root(self):
+ """When root does not exist isn't in the cmdline, log warning."""
+ info = "does not matter"
+
+ def fake_mount_info(path, log):
+ self.assertEqual("/", path)
+ self.assertEqual(LOG, log)
+ return ("/dev/root", "ext4", "/")
+
+ exists_mock_path = "cloudinit.config.cc_resizefs.os.path.exists"
+ with mock.patch(exists_mock_path) as m_exists:
+ m_exists.return_value = False
+ devpath = wrap_and_call(
+ "cloudinit.config.cc_resizefs.util",
+ {
+ "is_container": {"return_value": False},
+ "get_mount_info": {"side_effect": fake_mount_info},
+ "get_cmdline": {"return_value": "BOOT_IMAGE=/vmlinuz.efi"},
+ },
+ maybe_get_writable_device_path,
+ "/dev/root",
+ info,
+ LOG,
+ )
+ self.assertIsNone(devpath)
+ logs = self.logs.getvalue()
+ self.assertIn("WARNING: Unable to find device '/dev/root'", logs)
+
+ def test_maybe_get_writable_device_path_does_not_exist(self):
+ """When devpath does not exist, a warning is logged."""
+ info = "dev=/dev/I/dont/exist mnt_point=/ path=/dev/none"
+ devpath = wrap_and_call(
+ "cloudinit.config.cc_resizefs.util",
+ {"is_container": {"return_value": False}},
+ maybe_get_writable_device_path,
+ "/dev/I/dont/exist",
+ info,
+ LOG,
+ )
+ self.assertIsNone(devpath)
+ self.assertIn(
+ "WARNING: Device '/dev/I/dont/exist' did not exist."
+ " cannot resize: %s" % info,
+ self.logs.getvalue(),
+ )
+
+ def test_maybe_get_writable_device_path_does_not_exist_in_container(self):
+ """When devpath does not exist in a container, log a debug message."""
+ info = "dev=/dev/I/dont/exist mnt_point=/ path=/dev/none"
+ devpath = wrap_and_call(
+ "cloudinit.config.cc_resizefs.util",
+ {"is_container": {"return_value": True}},
+ maybe_get_writable_device_path,
+ "/dev/I/dont/exist",
+ info,
+ LOG,
+ )
+ self.assertIsNone(devpath)
+ self.assertIn(
+ "DEBUG: Device '/dev/I/dont/exist' did not exist in container."
+ " cannot resize: %s" % info,
+ self.logs.getvalue(),
+ )
+
+ def test_maybe_get_writable_device_path_raises_oserror(self):
+ """When unexpected OSError is raises by os.stat it is reraised."""
+ info = "dev=/dev/I/dont/exist mnt_point=/ path=/dev/none"
+ with self.assertRaises(OSError) as context_manager:
+ wrap_and_call(
+ "cloudinit.config.cc_resizefs",
+ {
+ "util.is_container": {"return_value": True},
+ "os.stat": {
+ "side_effect": OSError("Something unexpected")
+ },
+ },
+ maybe_get_writable_device_path,
+ "/dev/I/dont/exist",
+ info,
+ LOG,
+ )
+ self.assertEqual(
+ "Something unexpected", str(context_manager.exception)
+ )
+
+ def test_maybe_get_writable_device_path_non_block(self):
+ """When device is not a block device, emit warning return False."""
+ fake_devpath = self.tmp_path("dev/readwrite")
+ util.write_file(fake_devpath, "", mode=0o600) # read-write
+ info = "dev=/dev/root mnt_point=/ path={0}".format(fake_devpath)
+
+ devpath = wrap_and_call(
+ "cloudinit.config.cc_resizefs.util",
+ {"is_container": {"return_value": False}},
+ maybe_get_writable_device_path,
+ fake_devpath,
+ info,
+ LOG,
+ )
+ self.assertIsNone(devpath)
+ self.assertIn(
+ "WARNING: device '{0}' not a block device. cannot resize".format(
+ fake_devpath
+ ),
+ self.logs.getvalue(),
+ )
+
+ def test_maybe_get_writable_device_path_non_block_on_container(self):
+ """When device is non-block device in container, emit debug log."""
+ fake_devpath = self.tmp_path("dev/readwrite")
+ util.write_file(fake_devpath, "", mode=0o600) # read-write
+ info = "dev=/dev/root mnt_point=/ path={0}".format(fake_devpath)
+
+ devpath = wrap_and_call(
+ "cloudinit.config.cc_resizefs.util",
+ {"is_container": {"return_value": True}},
+ maybe_get_writable_device_path,
+ fake_devpath,
+ info,
+ LOG,
+ )
+ self.assertIsNone(devpath)
+ self.assertIn(
+ "DEBUG: device '{0}' not a block device in container."
+ " cannot resize".format(fake_devpath),
+ self.logs.getvalue(),
+ )
+
+ def test_maybe_get_writable_device_path_returns_cmdline_root(self):
+ """When root device is UUID in kernel commandline, update devpath."""
+ # XXX Long-term we want to use FilesystemMocking test to avoid
+ # touching os.stat.
+ FakeStat = namedtuple(
+ "FakeStat", ["st_mode", "st_size", "st_mtime"]
+ ) # minimal def.
+ info = "dev=/dev/root mnt_point=/ path=/does/not/matter"
+ devpath = wrap_and_call(
+ "cloudinit.config.cc_resizefs",
+ {
+ "util.get_cmdline": {"return_value": "asdf root=UUID=my-uuid"},
+ "util.is_container": False,
+ "os.path.exists": False, # /dev/root doesn't exist
+ "os.stat": {
+ "return_value": FakeStat(25008, 0, 1)
+ }, # char block device
+ },
+ maybe_get_writable_device_path,
+ "/dev/root",
+ info,
+ LOG,
+ )
+ self.assertEqual("/dev/disk/by-uuid/my-uuid", devpath)
+ self.assertIn(
+ "DEBUG: Converted /dev/root to '/dev/disk/by-uuid/my-uuid'"
+ " per kernel cmdline",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("cloudinit.util.mount_is_read_write")
+ @mock.patch("cloudinit.config.cc_resizefs.os.path.isdir")
+ def test_resize_btrfs_mount_is_ro(self, m_is_dir, m_is_rw):
+ """Do not resize / directly if it is read-only. (LP: #1734787)."""
+ m_is_rw.return_value = False
+ m_is_dir.return_value = True
+ self.assertEqual(
+ ("btrfs", "filesystem", "resize", "max", "//.snapshots"),
+ _resize_btrfs("/", "/dev/sda1"),
+ )
+
+ @mock.patch("cloudinit.util.mount_is_read_write")
+ @mock.patch("cloudinit.config.cc_resizefs.os.path.isdir")
+ def test_resize_btrfs_mount_is_rw(self, m_is_dir, m_is_rw):
+ """Do not resize / directly if it is read-only. (LP: #1734787)."""
+ m_is_rw.return_value = True
+ m_is_dir.return_value = True
+ self.assertEqual(
+ ("btrfs", "filesystem", "resize", "max", "/"),
+ _resize_btrfs("/", "/dev/sda1"),
+ )
+
+ @mock.patch("cloudinit.util.is_container", return_value=True)
+ @mock.patch("cloudinit.util.is_FreeBSD")
+ def test_maybe_get_writable_device_path_zfs_freebsd(
+ self, freebsd, m_is_container
+ ):
+ freebsd.return_value = True
+ info = "dev=gpt/system mnt_point=/ path=/"
+ devpth = maybe_get_writable_device_path("gpt/system", info, LOG)
+ self.assertEqual("gpt/system", devpth)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_resizefs_vyos.py b/tests/unittests/config/test_cc_resizefs_vyos.py
new file mode 100644
index 00000000..c995e6aa
--- /dev/null
+++ b/tests/unittests/config/test_cc_resizefs_vyos.py
@@ -0,0 +1,490 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import logging
+from collections import namedtuple
+
+from cloudinit.config.cc_resizefs_vyos import (
+ _resize_btrfs,
+ _resize_ext,
+ _resize_ufs,
+ _resize_xfs,
+ _resize_zfs,
+ can_skip_resize,
+ handle,
+ maybe_get_writable_device_path,
+)
+from cloudinit.subp import ProcessExecutionError
+from tests.unittests.helpers import (
+ CiTestCase,
+ mock,
+ skipUnlessJsonSchema,
+ util,
+ wrap_and_call,
+)
+
+LOG = logging.getLogger(__name__)
+
+
+class TestResizefs(CiTestCase):
+ with_logs = True
+
+ def setUp(self):
+ super(TestResizefs, self).setUp()
+ self.name = "resizefs"
+
+ @mock.patch("cloudinit.subp.subp")
+ def test_skip_ufs_resize(self, m_subp):
+ fs_type = "ufs"
+ resize_what = "/"
+ devpth = "/dev/da0p2"
+ err = (
+ "growfs: requested size 2.0GB is not larger than the "
+ "current filesystem size 2.0GB\n"
+ )
+ exception = ProcessExecutionError(stderr=err, exit_code=1)
+ m_subp.side_effect = exception
+ res = can_skip_resize(fs_type, resize_what, devpth)
+ self.assertTrue(res)
+
+ @mock.patch("cloudinit.subp.subp")
+ def test_cannot_skip_ufs_resize(self, m_subp):
+ fs_type = "ufs"
+ resize_what = "/"
+ devpth = "/dev/da0p2"
+ m_subp.return_value = (
+ "stdout: super-block backups (for fsck_ffs -b #) at:\n\n",
+ "growfs: no room to allocate last cylinder group; "
+ "leaving 364KB unused\n",
+ )
+ res = can_skip_resize(fs_type, resize_what, devpth)
+ self.assertFalse(res)
+
+ @mock.patch("cloudinit.subp.subp")
+ def test_cannot_skip_ufs_growfs_exception(self, m_subp):
+ fs_type = "ufs"
+ resize_what = "/"
+ devpth = "/dev/da0p2"
+ err = "growfs: /dev/da0p2 is not clean - run fsck.\n"
+ exception = ProcessExecutionError(stderr=err, exit_code=1)
+ m_subp.side_effect = exception
+ with self.assertRaises(ProcessExecutionError):
+ can_skip_resize(fs_type, resize_what, devpth)
+
+ def test_can_skip_resize_ext(self):
+ self.assertFalse(can_skip_resize("ext", "/", "/dev/sda1"))
+
+ def test_handle_noops_on_disabled(self):
+ """The handle function logs when the configuration disables resize."""
+ cfg = {"resizefs_enabled": False}
+ handle("cc_resizefs_vyos", cfg, _cloud=None, log=LOG, args=[])
+ self.assertIn(
+ "DEBUG: Skipping module named cc_resizefs_vyos, resizing disabled\n",
+ self.logs.getvalue(),
+ )
+
+ @skipUnlessJsonSchema()
+ def test_handle_schema_validation_logs_invalid_resizefs_enabled_value(self):
+ """The handle reports json schema violations as a warning.
+
+ Invalid values for resizefs_enabled result in disabling the module.
+ """
+ cfg = {"resizefs_enabled": "junk"}
+ handle("cc_resizefs_vyos", cfg, _cloud=None, log=LOG, args=[])
+ logs = self.logs.getvalue()
+ self.assertIn(
+ "WARNING: Invalid cloud-config provided:\nresizefs_enabled: 'junk' is"
+ " not one of [True, False, 'noblock']",
+ logs,
+ )
+ self.assertIn(
+ "DEBUG: Skipping module named cc_resizefs_vyos, resizing disabled\n",
+ logs,
+ )
+
+ @mock.patch("cloudinit.config.cc_resizefs_vyos.util.get_mount_info")
+ def test_handle_warns_on_unknown_mount_info(self, m_get_mount_info):
+ """handle warns when get_mount_info sees unknown filesystem for /."""
+ m_get_mount_info.return_value = None
+ cfg = {"resizefs_enabled": True}
+ handle("cc_resizefs_vyos", cfg, _cloud=None, log=LOG, args=[])
+ logs = self.logs.getvalue()
+ self.assertNotIn(
+ "WARNING: Invalid cloud-config provided:\nresizefs_enabled:", logs
+ )
+ self.assertIn(
+ "WARNING: Could not determine filesystem type of /\n", logs
+ )
+ self.assertEqual(
+ [mock.call("/", LOG)], m_get_mount_info.call_args_list
+ )
+
+ def test_handle_warns_on_undiscoverable_root_path_in_commandline(self):
+ """handle noops when the root path is not found on the commandline."""
+ cfg = {"resizefs_enabled": True}
+ exists_mock_path = "cloudinit.config.cc_resizefs_vyos.os.path.exists"
+
+ def fake_mount_info(path, log):
+ self.assertEqual("/", path)
+ self.assertEqual(LOG, log)
+ return ("/dev/root", "ext4", "/")
+
+ with mock.patch(exists_mock_path) as m_exists:
+ m_exists.return_value = False
+ wrap_and_call(
+ "cloudinit.config.cc_resizefs_vyos.util",
+ {
+ "is_container": {"return_value": False},
+ "get_mount_info": {"side_effect": fake_mount_info},
+ "get_cmdline": {"return_value": "BOOT_IMAGE=/vmlinuz.efi"},
+ },
+ handle,
+ "cc_resizefs_vyos",
+ cfg,
+ _cloud=None,
+ log=LOG,
+ args=[],
+ )
+ logs = self.logs.getvalue()
+ self.assertIn("WARNING: Unable to find device '/dev/root'", logs)
+
+ def test_resize_zfs_cmd_return(self):
+ zpool = "zroot"
+ devpth = "gpt/system"
+ self.assertEqual(
+ ("zpool", "online", "-e", zpool, devpth),
+ _resize_zfs(zpool, devpth),
+ )
+
+ def test_resize_xfs_cmd_return(self):
+ mount_point = "/mnt/test"
+ devpth = "/dev/sda1"
+ self.assertEqual(
+ ("xfs_growfs", mount_point), _resize_xfs(mount_point, devpth)
+ )
+
+ def test_resize_ext_cmd_return(self):
+ mount_point = "/"
+ devpth = "/dev/sdb1"
+ self.assertEqual(
+ ("resize2fs", devpth), _resize_ext(mount_point, devpth)
+ )
+
+ def test_resize_ufs_cmd_return(self):
+ mount_point = "/"
+ devpth = "/dev/sda2"
+ self.assertEqual(
+ ("growfs", "-y", mount_point), _resize_ufs(mount_point, devpth)
+ )
+
+ @mock.patch("cloudinit.util.is_container", return_value=False)
+ @mock.patch("cloudinit.util.parse_mount")
+ @mock.patch("cloudinit.util.get_device_info_from_zpool")
+ @mock.patch("cloudinit.util.get_mount_info")
+ def test_handle_zfs_root(
+ self, mount_info, zpool_info, parse_mount, is_container
+ ):
+ devpth = "vmzroot/ROOT/freebsd"
+ disk = "gpt/system"
+ fs_type = "zfs"
+ mount_point = "/"
+
+ mount_info.return_value = (devpth, fs_type, mount_point)
+ zpool_info.return_value = disk
+ parse_mount.return_value = (devpth, fs_type, mount_point)
+
+ cfg = {"resizefs_enabled": True}
+
+ with mock.patch("cloudinit.config.cc_resizefs_vyos.do_resize") as dresize:
+ handle("cc_resizefs_vyos", cfg, _cloud=None, log=LOG, args=[])
+ ret = dresize.call_args[0][0]
+
+ self.assertEqual(("zpool", "online", "-e", "vmzroot", disk), ret)
+
+ @mock.patch("cloudinit.util.is_container", return_value=False)
+ @mock.patch("cloudinit.util.get_mount_info")
+ @mock.patch("cloudinit.util.get_device_info_from_zpool")
+ @mock.patch("cloudinit.util.parse_mount")
+ def test_handle_modern_zfsroot(
+ self, mount_info, zpool_info, parse_mount, is_container
+ ):
+ devpth = "zroot/ROOT/default"
+ disk = "da0p3"
+ fs_type = "zfs"
+ mount_point = "/"
+
+ mount_info.return_value = (devpth, fs_type, mount_point)
+ zpool_info.return_value = disk
+ parse_mount.return_value = (devpth, fs_type, mount_point)
+
+ cfg = {"resizefs_enabled": True}
+
+ def fake_stat(devpath):
+ if devpath == disk:
+ raise OSError("not here")
+ FakeStat = namedtuple(
+ "FakeStat", ["st_mode", "st_size", "st_mtime"]
+ ) # minimal stat
+ return FakeStat(25008, 0, 1) # fake char block device
+
+ with mock.patch("cloudinit.config.cc_resizefs_vyos.do_resize") as dresize:
+ with mock.patch("cloudinit.config.cc_resizefs_vyos.os.stat") as m_stat:
+ m_stat.side_effect = fake_stat
+ handle("cc_resizefs_vyos", cfg, _cloud=None, log=LOG, args=[])
+
+ self.assertEqual(
+ ("zpool", "online", "-e", "zroot", "/dev/" + disk),
+ dresize.call_args[0][0],
+ )
+
+
+class TestRootDevFromCmdline(CiTestCase):
+ def test_rootdev_from_cmdline_with_no_root(self):
+ """Return None from rootdev_from_cmdline when root is not present."""
+ invalid_cases = [
+ "BOOT_IMAGE=/adsf asdfa werasef root adf",
+ "BOOT_IMAGE=/adsf",
+ "",
+ ]
+ for case in invalid_cases:
+ self.assertIsNone(util.rootdev_from_cmdline(case))
+
+ def test_rootdev_from_cmdline_with_root_startswith_dev(self):
+ """Return the cmdline root when the path starts with /dev."""
+ self.assertEqual(
+ "/dev/this", util.rootdev_from_cmdline("asdf root=/dev/this")
+ )
+
+ def test_rootdev_from_cmdline_with_root_without_dev_prefix(self):
+ """Add /dev prefix to cmdline root when the path lacks the prefix."""
+ self.assertEqual(
+ "/dev/this", util.rootdev_from_cmdline("asdf root=this")
+ )
+
+ def test_rootdev_from_cmdline_with_root_with_label(self):
+ """When cmdline root contains a LABEL, our root is disk/by-label."""
+ self.assertEqual(
+ "/dev/disk/by-label/unique",
+ util.rootdev_from_cmdline("asdf root=LABEL=unique"),
+ )
+
+ def test_rootdev_from_cmdline_with_root_with_uuid(self):
+ """When cmdline root contains a UUID, our root is disk/by-uuid."""
+ self.assertEqual(
+ "/dev/disk/by-uuid/adsfdsaf-adsf",
+ util.rootdev_from_cmdline("asdf root=UUID=adsfdsaf-adsf"),
+ )
+
+
+class TestMaybeGetDevicePathAsWritableBlock(CiTestCase):
+
+ with_logs = True
+
+ def test_maybe_get_writable_device_path_none_on_overlayroot(self):
+ """When devpath is overlayroot (on MAAS), is_dev_writable is False."""
+ info = "does not matter"
+ devpath = wrap_and_call(
+ "cloudinit.config.cc_resizefs_vyos.util",
+ {"is_container": {"return_value": False}},
+ maybe_get_writable_device_path,
+ "overlayroot",
+ info,
+ LOG,
+ )
+ self.assertIsNone(devpath)
+ self.assertIn(
+ "Not attempting to resize devpath 'overlayroot'",
+ self.logs.getvalue(),
+ )
+
+ def test_maybe_get_writable_device_path_warns_missing_cmdline_root(self):
+ """When root does not exist isn't in the cmdline, log warning."""
+ info = "does not matter"
+
+ def fake_mount_info(path, log):
+ self.assertEqual("/", path)
+ self.assertEqual(LOG, log)
+ return ("/dev/root", "ext4", "/")
+
+ exists_mock_path = "cloudinit.config.cc_resizefs_vyos.os.path.exists"
+ with mock.patch(exists_mock_path) as m_exists:
+ m_exists.return_value = False
+ devpath = wrap_and_call(
+ "cloudinit.config.cc_resizefs_vyos.util",
+ {
+ "is_container": {"return_value": False},
+ "get_mount_info": {"side_effect": fake_mount_info},
+ "get_cmdline": {"return_value": "BOOT_IMAGE=/vmlinuz.efi"},
+ },
+ maybe_get_writable_device_path,
+ "/dev/root",
+ info,
+ LOG,
+ )
+ self.assertIsNone(devpath)
+ logs = self.logs.getvalue()
+ self.assertIn("WARNING: Unable to find device '/dev/root'", logs)
+
+ def test_maybe_get_writable_device_path_does_not_exist(self):
+ """When devpath does not exist, a warning is logged."""
+ info = "dev=/dev/I/dont/exist mnt_point=/ path=/dev/none"
+ devpath = wrap_and_call(
+ "cloudinit.config.cc_resizefs_vyos.util",
+ {"is_container": {"return_value": False}},
+ maybe_get_writable_device_path,
+ "/dev/I/dont/exist",
+ info,
+ LOG,
+ )
+ self.assertIsNone(devpath)
+ self.assertIn(
+ "WARNING: Device '/dev/I/dont/exist' did not exist."
+ " cannot resize: %s" % info,
+ self.logs.getvalue(),
+ )
+
+ def test_maybe_get_writable_device_path_does_not_exist_in_container(self):
+ """When devpath does not exist in a container, log a debug message."""
+ info = "dev=/dev/I/dont/exist mnt_point=/ path=/dev/none"
+ devpath = wrap_and_call(
+ "cloudinit.config.cc_resizefs_vyos.util",
+ {"is_container": {"return_value": True}},
+ maybe_get_writable_device_path,
+ "/dev/I/dont/exist",
+ info,
+ LOG,
+ )
+ self.assertIsNone(devpath)
+ self.assertIn(
+ "DEBUG: Device '/dev/I/dont/exist' did not exist in container."
+ " cannot resize: %s" % info,
+ self.logs.getvalue(),
+ )
+
+ def test_maybe_get_writable_device_path_raises_oserror(self):
+ """When unexpected OSError is raises by os.stat it is reraised."""
+ info = "dev=/dev/I/dont/exist mnt_point=/ path=/dev/none"
+ with self.assertRaises(OSError) as context_manager:
+ wrap_and_call(
+ "cloudinit.config.cc_resizefs_vyos",
+ {
+ "util.is_container": {"return_value": True},
+ "os.stat": {
+ "side_effect": OSError("Something unexpected")
+ },
+ },
+ maybe_get_writable_device_path,
+ "/dev/I/dont/exist",
+ info,
+ LOG,
+ )
+ self.assertEqual(
+ "Something unexpected", str(context_manager.exception)
+ )
+
+ def test_maybe_get_writable_device_path_non_block(self):
+ """When device is not a block device, emit warning return False."""
+ fake_devpath = self.tmp_path("dev/readwrite")
+ util.write_file(fake_devpath, "", mode=0o600) # read-write
+ info = "dev=/dev/root mnt_point=/ path={0}".format(fake_devpath)
+
+ devpath = wrap_and_call(
+ "cloudinit.config.cc_resizefs_vyos.util",
+ {"is_container": {"return_value": False}},
+ maybe_get_writable_device_path,
+ fake_devpath,
+ info,
+ LOG,
+ )
+ self.assertIsNone(devpath)
+ self.assertIn(
+ "WARNING: device '{0}' not a block device. cannot resize".format(
+ fake_devpath
+ ),
+ self.logs.getvalue(),
+ )
+
+ def test_maybe_get_writable_device_path_non_block_on_container(self):
+ """When device is non-block device in container, emit debug log."""
+ fake_devpath = self.tmp_path("dev/readwrite")
+ util.write_file(fake_devpath, "", mode=0o600) # read-write
+ info = "dev=/dev/root mnt_point=/ path={0}".format(fake_devpath)
+
+ devpath = wrap_and_call(
+ "cloudinit.config.cc_resizefs_vyos.util",
+ {"is_container": {"return_value": True}},
+ maybe_get_writable_device_path,
+ fake_devpath,
+ info,
+ LOG,
+ )
+ self.assertIsNone(devpath)
+ self.assertIn(
+ "DEBUG: device '{0}' not a block device in container."
+ " cannot resize".format(fake_devpath),
+ self.logs.getvalue(),
+ )
+
+ def test_maybe_get_writable_device_path_returns_cmdline_root(self):
+ """When root device is UUID in kernel commandline, update devpath."""
+ # XXX Long-term we want to use FilesystemMocking test to avoid
+ # touching os.stat.
+ FakeStat = namedtuple(
+ "FakeStat", ["st_mode", "st_size", "st_mtime"]
+ ) # minimal def.
+ info = "dev=/dev/root mnt_point=/ path=/does/not/matter"
+ devpath = wrap_and_call(
+ "cloudinit.config.cc_resizefs_vyos",
+ {
+ "util.get_cmdline": {"return_value": "asdf root=UUID=my-uuid"},
+ "util.is_container": False,
+ "os.path.exists": False, # /dev/root doesn't exist
+ "os.stat": {
+ "return_value": FakeStat(25008, 0, 1)
+ }, # char block device
+ },
+ maybe_get_writable_device_path,
+ "/dev/root",
+ info,
+ LOG,
+ )
+ self.assertEqual("/dev/disk/by-uuid/my-uuid", devpath)
+ self.assertIn(
+ "DEBUG: Converted /dev/root to '/dev/disk/by-uuid/my-uuid'"
+ " per kernel cmdline",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("cloudinit.util.mount_is_read_write")
+ @mock.patch("cloudinit.config.cc_resizefs_vyos.os.path.isdir")
+ def test_resize_btrfs_mount_is_ro(self, m_is_dir, m_is_rw):
+ """Do not resize / directly if it is read-only. (LP: #1734787)."""
+ m_is_rw.return_value = False
+ m_is_dir.return_value = True
+ self.assertEqual(
+ ("btrfs", "filesystem", "resize", "max", "//.snapshots"),
+ _resize_btrfs("/", "/dev/sda1"),
+ )
+
+ @mock.patch("cloudinit.util.mount_is_read_write")
+ @mock.patch("cloudinit.config.cc_resizefs_vyos.os.path.isdir")
+ def test_resize_btrfs_mount_is_rw(self, m_is_dir, m_is_rw):
+ """Do not resize / directly if it is read-only. (LP: #1734787)."""
+ m_is_rw.return_value = True
+ m_is_dir.return_value = True
+ self.assertEqual(
+ ("btrfs", "filesystem", "resize", "max", "/"),
+ _resize_btrfs("/", "/dev/sda1"),
+ )
+
+ @mock.patch("cloudinit.util.is_container", return_value=True)
+ @mock.patch("cloudinit.util.is_FreeBSD")
+ def test_maybe_get_writable_device_path_zfs_freebsd(
+ self, freebsd, m_is_container
+ ):
+ freebsd.return_value = True
+ info = "dev=gpt/system mnt_point=/ path=/"
+ devpth = maybe_get_writable_device_path("gpt/system", info, LOG)
+ self.assertEqual("gpt/system", devpth)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_resolv_conf.py b/tests/unittests/config/test_cc_resolv_conf.py
new file mode 100644
index 00000000..8896a4e8
--- /dev/null
+++ b/tests/unittests/config/test_cc_resolv_conf.py
@@ -0,0 +1,197 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import logging
+import os
+import shutil
+import tempfile
+from copy import deepcopy
+from unittest import mock
+
+import pytest
+
+from cloudinit import cloud, distros, helpers, util
+from cloudinit.config import cc_resolv_conf
+from cloudinit.config.cc_resolv_conf import generate_resolv_conf
+from tests.unittests import helpers as t_help
+from tests.unittests.util import MockDistro
+
+LOG = logging.getLogger(__name__)
+EXPECTED_HEADER = """\
+# Your system has been configured with 'manage-resolv-conf' set to true.
+# As a result, cloud-init has written this file with configuration data
+# that it has been provided. Cloud-init, by default, will write this file
+# a single time (PER_ONCE).
+#\n\n"""
+
+
+class TestResolvConf(t_help.FilesystemMockingTestCase):
+ with_logs = True
+ cfg = {"manage_resolv_conf": True, "resolv_conf": {}}
+
+ def setUp(self):
+ super(TestResolvConf, self).setUp()
+ self.tmp = tempfile.mkdtemp()
+ util.ensure_dir(os.path.join(self.tmp, "data"))
+ self.addCleanup(shutil.rmtree, self.tmp)
+
+ def _fetch_distro(self, kind, conf=None):
+ cls = distros.fetch(kind)
+ paths = helpers.Paths({"cloud_dir": self.tmp})
+ conf = {} if conf is None else conf
+ return cls(kind, conf, paths)
+
+ def call_resolv_conf_handler(self, distro_name, conf, cc=None):
+ if not cc:
+ ds = None
+ distro = self._fetch_distro(distro_name, conf)
+ paths = helpers.Paths({"cloud_dir": self.tmp})
+ cc = cloud.Cloud(ds, paths, {}, distro, None)
+ cc_resolv_conf.handle("cc_resolv_conf", conf, cc, LOG, [])
+
+ @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file")
+ def test_resolv_conf_systemd_resolved(self, m_render_to_file):
+ self.call_resolv_conf_handler("photon", self.cfg)
+
+ assert [
+ mock.call(mock.ANY, "/etc/systemd/resolved.conf", mock.ANY)
+ ] == m_render_to_file.call_args_list
+
+ @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file")
+ def test_resolv_conf_no_param(self, m_render_to_file):
+ tmp = deepcopy(self.cfg)
+ self.logs.truncate(0)
+ tmp.pop("resolv_conf")
+ self.call_resolv_conf_handler("photon", tmp)
+
+ self.assertIn(
+ "manage_resolv_conf True but no parameters provided",
+ self.logs.getvalue(),
+ )
+ assert [
+ mock.call(mock.ANY, "/etc/systemd/resolved.conf", mock.ANY)
+ ] not in m_render_to_file.call_args_list
+
+ @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file")
+ def test_resolv_conf_manage_resolv_conf_false(self, m_render_to_file):
+ tmp = deepcopy(self.cfg)
+ self.logs.truncate(0)
+ tmp["manage_resolv_conf"] = False
+ self.call_resolv_conf_handler("photon", tmp)
+ self.assertIn(
+ "'manage_resolv_conf' present but set to False",
+ self.logs.getvalue(),
+ )
+ assert [
+ mock.call(mock.ANY, "/etc/systemd/resolved.conf", mock.ANY)
+ ] not in m_render_to_file.call_args_list
+
+ @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file")
+ def test_resolv_conf_etc_resolv_conf(self, m_render_to_file):
+ self.call_resolv_conf_handler("rhel", self.cfg)
+
+ assert [
+ mock.call(mock.ANY, "/etc/resolv.conf", mock.ANY)
+ ] == m_render_to_file.call_args_list
+
+ @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file")
+ def test_resolv_conf_invalid_resolve_conf_fn(self, m_render_to_file):
+ ds = None
+ distro = self._fetch_distro("rhel", self.cfg)
+ paths = helpers.Paths({"cloud_dir": self.tmp})
+ cc = cloud.Cloud(ds, paths, {}, distro, None)
+ cc.distro.resolve_conf_fn = "bla"
+
+ self.logs.truncate(0)
+ self.call_resolv_conf_handler("rhel", self.cfg, cc)
+
+ self.assertIn(
+ "No template found, not rendering resolve configs",
+ self.logs.getvalue(),
+ )
+
+ assert [
+ mock.call(mock.ANY, "/etc/resolv.conf", mock.ANY)
+ ] not in m_render_to_file.call_args_list
+
+
+class TestGenerateResolvConf:
+
+ dist = MockDistro()
+ tmpl_fn = t_help.cloud_init_project_dir("templates/resolv.conf.tmpl")
+
+ @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file")
+ def test_dist_resolv_conf_fn(self, m_render_to_file):
+ self.dist.resolve_conf_fn = "/tmp/resolv-test.conf"
+ generate_resolv_conf(
+ self.tmpl_fn, mock.MagicMock(), self.dist.resolve_conf_fn
+ )
+
+ assert [
+ mock.call(mock.ANY, self.dist.resolve_conf_fn, mock.ANY)
+ ] == m_render_to_file.call_args_list
+
+ @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file")
+ def test_target_fname_is_used_if_passed(self, m_render_to_file):
+ path = "/use/this/path"
+ generate_resolv_conf(self.tmpl_fn, mock.MagicMock(), path)
+
+ assert [
+ mock.call(mock.ANY, path, mock.ANY)
+ ] == m_render_to_file.call_args_list
+
+ # Patch in templater so we can assert on the actual generated content
+ @mock.patch("cloudinit.templater.util.write_file")
+ # Parameterise with the value to be passed to generate_resolv_conf as the
+ # params parameter, and the expected line after the header as
+ # expected_extra_line.
+ @pytest.mark.parametrize(
+ "params,expected_extra_line",
+ [
+ # No options
+ ({}, None),
+ # Just a true flag
+ ({"options": {"foo": True}}, "options foo"),
+ # Just a false flag
+ ({"options": {"foo": False}}, None),
+ # Just an option
+ ({"options": {"foo": "some_value"}}, "options foo:some_value"),
+ # A true flag and an option
+ (
+ {"options": {"foo": "some_value", "bar": True}},
+ "options bar foo:some_value",
+ ),
+ # Two options
+ (
+ {"options": {"foo": "some_value", "bar": "other_value"}},
+ "options bar:other_value foo:some_value",
+ ),
+ # Everything
+ (
+ {
+ "options": {
+ "foo": "some_value",
+ "bar": "other_value",
+ "baz": False,
+ "spam": True,
+ }
+ },
+ "options spam bar:other_value foo:some_value",
+ ),
+ ],
+ )
+ def test_flags_and_options(
+ self, m_write_file, params, expected_extra_line
+ ):
+ target_fn = "/etc/resolv.conf"
+ generate_resolv_conf(self.tmpl_fn, params, target_fn)
+
+ expected_content = EXPECTED_HEADER
+ if expected_extra_line is not None:
+ # If we have any extra lines, expect a trailing newline
+ expected_content += "\n".join([expected_extra_line, ""])
+ assert [
+ mock.call(mock.ANY, expected_content, mode=mock.ANY)
+ ] == m_write_file.call_args_list
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_rh_subscription.py b/tests/unittests/config/test_cc_rh_subscription.py
new file mode 100644
index 00000000..fcc7db34
--- /dev/null
+++ b/tests/unittests/config/test_cc_rh_subscription.py
@@ -0,0 +1,320 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Tests for registering RHEL subscription via rh_subscription."""
+
+import copy
+import logging
+
+from cloudinit import subp
+from cloudinit.config import cc_rh_subscription
+from tests.unittests.helpers import CiTestCase, mock
+
+SUBMGR = cc_rh_subscription.SubscriptionManager
+SUB_MAN_CLI = "cloudinit.config.cc_rh_subscription._sub_man_cli"
+
+
+@mock.patch(SUB_MAN_CLI)
+class GoodTests(CiTestCase):
+ with_logs = True
+
+ def setUp(self):
+ super(GoodTests, self).setUp()
+ self.name = "cc_rh_subscription"
+ self.cloud_init = None
+ self.log = logging.getLogger("good_tests")
+ self.args = []
+ self.handle = cc_rh_subscription.handle
+
+ self.config = {
+ "rh_subscription": {
+ "username": "scooby@do.com",
+ "password": "scooby-snacks",
+ }
+ }
+ self.config_full = {
+ "rh_subscription": {
+ "username": "scooby@do.com",
+ "password": "scooby-snacks",
+ "auto-attach": True,
+ "service-level": "self-support",
+ "add-pool": ["pool1", "pool2", "pool3"],
+ "enable-repo": ["repo1", "repo2", "repo3"],
+ "disable-repo": ["repo4", "repo5"],
+ }
+ }
+
+ def test_already_registered(self, m_sman_cli):
+ """
+ Emulates a system that is already registered. Ensure it gets
+ a non-ProcessExecution error from is_registered()
+ """
+ self.handle(
+ self.name, self.config, self.cloud_init, self.log, self.args
+ )
+ self.assertEqual(m_sman_cli.call_count, 1)
+ self.assertIn("System is already registered", self.logs.getvalue())
+
+ def test_simple_registration(self, m_sman_cli):
+ """
+ Simple registration with username and password
+ """
+ reg = (
+ "The system has been registered with ID:"
+ " 12345678-abde-abcde-1234-1234567890abc"
+ )
+ m_sman_cli.side_effect = [subp.ProcessExecutionError, (reg, "bar")]
+ self.handle(
+ self.name, self.config, self.cloud_init, self.log, self.args
+ )
+ self.assertIn(mock.call(["identity"]), m_sman_cli.call_args_list)
+ self.assertIn(
+ mock.call(
+ [
+ "register",
+ "--username=scooby@do.com",
+ "--password=scooby-snacks",
+ ],
+ logstring_val=True,
+ ),
+ m_sman_cli.call_args_list,
+ )
+ self.assertIn(
+ "rh_subscription plugin completed successfully",
+ self.logs.getvalue(),
+ )
+ self.assertEqual(m_sman_cli.call_count, 2)
+
+ @mock.patch.object(cc_rh_subscription.SubscriptionManager, "_getRepos")
+ def test_update_repos_disable_with_none(self, m_get_repos, m_sman_cli):
+ cfg = copy.deepcopy(self.config)
+ m_get_repos.return_value = ([], ["repo1"])
+ cfg["rh_subscription"].update(
+ {"enable-repo": ["repo1"], "disable-repo": None}
+ )
+ mysm = cc_rh_subscription.SubscriptionManager(cfg)
+ self.assertEqual(True, mysm.update_repos())
+ m_get_repos.assert_called_with()
+ self.assertEqual(
+ m_sman_cli.call_args_list, [mock.call(["repos", "--enable=repo1"])]
+ )
+
+ def test_full_registration(self, m_sman_cli):
+ """
+ Registration with auto-attach, service-level, adding pools,
+ and enabling and disabling yum repos
+ """
+ call_lists = []
+ call_lists.append(["attach", "--pool=pool1", "--pool=pool3"])
+ call_lists.append(
+ ["repos", "--disable=repo5", "--enable=repo2", "--enable=repo3"]
+ )
+ call_lists.append(["attach", "--auto", "--servicelevel=self-support"])
+ reg = (
+ "The system has been registered with ID:"
+ " 12345678-abde-abcde-1234-1234567890abc"
+ )
+ m_sman_cli.side_effect = [
+ subp.ProcessExecutionError,
+ (reg, "bar"),
+ ("Service level set to: self-support", ""),
+ ("pool1\npool3\n", ""),
+ ("pool2\n", ""),
+ ("", ""),
+ ("Repo ID: repo1\nRepo ID: repo5\n", ""),
+ ("Repo ID: repo2\nRepo ID: repo3\nRepo ID: repo4", ""),
+ ("", ""),
+ ]
+ self.handle(
+ self.name, self.config_full, self.cloud_init, self.log, self.args
+ )
+ self.assertEqual(m_sman_cli.call_count, 9)
+ for call in call_lists:
+ self.assertIn(mock.call(call), m_sman_cli.call_args_list)
+ self.assertIn(
+ "rh_subscription plugin completed successfully",
+ self.logs.getvalue(),
+ )
+
+
+@mock.patch(SUB_MAN_CLI)
+class TestBadInput(CiTestCase):
+ with_logs = True
+ name = "cc_rh_subscription"
+ cloud_init = None
+ log = logging.getLogger("bad_tests")
+ args = []
+ SM = cc_rh_subscription.SubscriptionManager
+ reg = (
+ "The system has been registered with ID:"
+ " 12345678-abde-abcde-1234-1234567890abc"
+ )
+
+ config_no_password = {"rh_subscription": {"username": "scooby@do.com"}}
+
+ config_no_key = {
+ "rh_subscription": {
+ "activation-key": "1234abcde",
+ }
+ }
+
+ config_service = {
+ "rh_subscription": {
+ "username": "scooby@do.com",
+ "password": "scooby-snacks",
+ "service-level": "self-support",
+ }
+ }
+
+ config_badpool = {
+ "rh_subscription": {
+ "username": "scooby@do.com",
+ "password": "scooby-snacks",
+ "add-pool": "not_a_list",
+ }
+ }
+ config_badrepo = {
+ "rh_subscription": {
+ "username": "scooby@do.com",
+ "password": "scooby-snacks",
+ "enable-repo": "not_a_list",
+ }
+ }
+ config_badkey = {
+ "rh_subscription": {
+ "activation-key": "abcdef1234",
+ "fookey": "bar",
+ "org": "123",
+ }
+ }
+
+ def setUp(self):
+ super(TestBadInput, self).setUp()
+ self.handle = cc_rh_subscription.handle
+
+ def assert_logged_warnings(self, warnings):
+ logs = self.logs.getvalue()
+ missing = [w for w in warnings if "WARNING: " + w not in logs]
+ self.assertEqual([], missing, "Missing expected warnings.")
+
+ def test_no_password(self, m_sman_cli):
+ """Attempt to register without the password key/value."""
+ m_sman_cli.side_effect = [
+ subp.ProcessExecutionError,
+ (self.reg, "bar"),
+ ]
+ self.handle(
+ self.name,
+ self.config_no_password,
+ self.cloud_init,
+ self.log,
+ self.args,
+ )
+ self.assertEqual(m_sman_cli.call_count, 0)
+
+ def test_no_org(self, m_sman_cli):
+ """Attempt to register without the org key/value."""
+ m_sman_cli.side_effect = [subp.ProcessExecutionError]
+ self.handle(
+ self.name, self.config_no_key, self.cloud_init, self.log, self.args
+ )
+ m_sman_cli.assert_called_with(["identity"])
+ self.assertEqual(m_sman_cli.call_count, 1)
+ self.assert_logged_warnings(
+ (
+ "Unable to register system due to incomplete information.",
+ "Use either activationkey and org *or* userid and password",
+ "Registration failed or did not run completely",
+ "rh_subscription plugin did not complete successfully",
+ )
+ )
+
+ def test_service_level_without_auto(self, m_sman_cli):
+ """Attempt to register using service-level without auto-attach key."""
+ m_sman_cli.side_effect = [
+ subp.ProcessExecutionError,
+ (self.reg, "bar"),
+ ]
+ self.handle(
+ self.name,
+ self.config_service,
+ self.cloud_init,
+ self.log,
+ self.args,
+ )
+ self.assertEqual(m_sman_cli.call_count, 1)
+ self.assert_logged_warnings(
+ (
+ "The service-level key must be used in conjunction with ",
+ "rh_subscription plugin did not complete successfully",
+ )
+ )
+
+ def test_pool_not_a_list(self, m_sman_cli):
+ """
+ Register with pools that are not in the format of a list
+ """
+ m_sman_cli.side_effect = [
+ subp.ProcessExecutionError,
+ (self.reg, "bar"),
+ ]
+ self.handle(
+ self.name,
+ self.config_badpool,
+ self.cloud_init,
+ self.log,
+ self.args,
+ )
+ self.assertEqual(m_sman_cli.call_count, 2)
+ self.assert_logged_warnings(
+ (
+ "Pools must in the format of a list",
+ "rh_subscription plugin did not complete successfully",
+ )
+ )
+
+ def test_repo_not_a_list(self, m_sman_cli):
+ """
+ Register with repos that are not in the format of a list
+ """
+ m_sman_cli.side_effect = [
+ subp.ProcessExecutionError,
+ (self.reg, "bar"),
+ ]
+ self.handle(
+ self.name,
+ self.config_badrepo,
+ self.cloud_init,
+ self.log,
+ self.args,
+ )
+ self.assertEqual(m_sman_cli.call_count, 2)
+ self.assert_logged_warnings(
+ (
+ "Repo IDs must in the format of a list.",
+ "Unable to add or remove repos",
+ "rh_subscription plugin did not complete successfully",
+ )
+ )
+
+ def test_bad_key_value(self, m_sman_cli):
+ """
+ Attempt to register with a key that we don't know
+ """
+ m_sman_cli.side_effect = [
+ subp.ProcessExecutionError,
+ (self.reg, "bar"),
+ ]
+ self.handle(
+ self.name, self.config_badkey, self.cloud_init, self.log, self.args
+ )
+ self.assertEqual(m_sman_cli.call_count, 1)
+ self.assert_logged_warnings(
+ (
+ "fookey is not a valid key for rh_subscription. Valid keys"
+ " are:",
+ "rh_subscription plugin did not complete successfully",
+ )
+ )
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_rsyslog.py b/tests/unittests/config/test_cc_rsyslog.py
index 8c8e2838..e5d06ca2 100644
--- a/tests/unittests/test_handler/test_handler_rsyslog.py
+++ b/tests/unittests/config/test_cc_rsyslog.py
@@ -4,55 +4,63 @@ import os
import shutil
import tempfile
-from cloudinit.config.cc_rsyslog import (
- apply_rsyslog_changes, DEF_DIR, DEF_FILENAME, DEF_RELOAD, load_config,
- parse_remotes_line, remotes_to_rsyslog_cfg)
from cloudinit import util
-
-from cloudinit.tests import helpers as t_help
+from cloudinit.config.cc_rsyslog import (
+ DEF_DIR,
+ DEF_FILENAME,
+ DEF_RELOAD,
+ apply_rsyslog_changes,
+ load_config,
+ parse_remotes_line,
+ remotes_to_rsyslog_cfg,
+)
+from tests.unittests import helpers as t_help
class TestLoadConfig(t_help.TestCase):
def setUp(self):
super(TestLoadConfig, self).setUp()
self.basecfg = {
- 'config_filename': DEF_FILENAME,
- 'config_dir': DEF_DIR,
- 'service_reload_command': DEF_RELOAD,
- 'configs': [],
- 'remotes': {},
+ "config_filename": DEF_FILENAME,
+ "config_dir": DEF_DIR,
+ "service_reload_command": DEF_RELOAD,
+ "configs": [],
+ "remotes": {},
}
def test_legacy_full(self):
- found = load_config({
- 'rsyslog': ['*.* @192.168.1.1'],
- 'rsyslog_dir': "mydir",
- 'rsyslog_filename': "myfilename"})
- self.basecfg.update({
- 'configs': ['*.* @192.168.1.1'],
- 'config_dir': "mydir",
- 'config_filename': 'myfilename',
- 'service_reload_command': 'auto'}
+ found = load_config(
+ {
+ "rsyslog": ["*.* @192.168.1.1"],
+ "rsyslog_dir": "mydir",
+ "rsyslog_filename": "myfilename",
+ }
+ )
+ self.basecfg.update(
+ {
+ "configs": ["*.* @192.168.1.1"],
+ "config_dir": "mydir",
+ "config_filename": "myfilename",
+ "service_reload_command": "auto",
+ }
)
self.assertEqual(found, self.basecfg)
def test_legacy_defaults(self):
- found = load_config({
- 'rsyslog': ['*.* @192.168.1.1']})
- self.basecfg.update({
- 'configs': ['*.* @192.168.1.1']})
+ found = load_config({"rsyslog": ["*.* @192.168.1.1"]})
+ self.basecfg.update({"configs": ["*.* @192.168.1.1"]})
self.assertEqual(found, self.basecfg)
def test_new_defaults(self):
self.assertEqual(load_config({}), self.basecfg)
def test_new_configs(self):
- cfgs = ['*.* myhost', '*.* my2host']
- self.basecfg.update({'configs': cfgs})
+ cfgs = ["*.* myhost", "*.* my2host"]
+ self.basecfg.update({"configs": cfgs})
self.assertEqual(
- load_config({'rsyslog': {'configs': cfgs}}),
- self.basecfg)
+ load_config({"rsyslog": {"configs": cfgs}}), self.basecfg
+ )
class TestApplyChanges(t_help.TestCase):
@@ -63,27 +71,29 @@ class TestApplyChanges(t_help.TestCase):
def test_simple(self):
cfgline = "*.* foohost"
changed = apply_rsyslog_changes(
- configs=[cfgline], def_fname="foo.cfg", cfg_dir=self.tmp)
+ configs=[cfgline], def_fname="foo.cfg", cfg_dir=self.tmp
+ )
fname = os.path.join(self.tmp, "foo.cfg")
self.assertEqual([fname], changed)
- self.assertEqual(
- util.load_file(fname), cfgline + "\n")
+ self.assertEqual(util.load_file(fname), cfgline + "\n")
def test_multiple_files(self):
configs = [
- '*.* foohost',
- {'content': 'abc', 'filename': 'my.cfg'},
- {'content': 'filefoo-content',
- 'filename': os.path.join(self.tmp, 'mydir/mycfg')},
+ "*.* foohost",
+ {"content": "abc", "filename": "my.cfg"},
+ {
+ "content": "filefoo-content",
+ "filename": os.path.join(self.tmp, "mydir/mycfg"),
+ },
]
changed = apply_rsyslog_changes(
- configs=configs, def_fname="default.cfg", cfg_dir=self.tmp)
+ configs=configs, def_fname="default.cfg", cfg_dir=self.tmp
+ )
expected = [
- (os.path.join(self.tmp, "default.cfg"),
- "*.* foohost\n"),
+ (os.path.join(self.tmp, "default.cfg"), "*.* foohost\n"),
(os.path.join(self.tmp, "my.cfg"), "abc\n"),
(os.path.join(self.tmp, "mydir/mycfg"), "filefoo-content\n"),
]
@@ -91,30 +101,37 @@ class TestApplyChanges(t_help.TestCase):
actual = []
for fname, _content in expected:
util.load_file(fname)
- actual.append((fname, util.load_file(fname),))
+ actual.append(
+ (
+ fname,
+ util.load_file(fname),
+ )
+ )
self.assertEqual(expected, actual)
def test_repeat_def(self):
- configs = ['*.* foohost', "*.warn otherhost"]
+ configs = ["*.* foohost", "*.warn otherhost"]
changed = apply_rsyslog_changes(
- configs=configs, def_fname="default.cfg", cfg_dir=self.tmp)
+ configs=configs, def_fname="default.cfg", cfg_dir=self.tmp
+ )
fname = os.path.join(self.tmp, "default.cfg")
self.assertEqual([fname], changed)
- expected_content = '\n'.join([c for c in configs]) + '\n'
+ expected_content = "\n".join([c for c in configs]) + "\n"
found_content = util.load_file(fname)
self.assertEqual(expected_content, found_content)
def test_multiline_content(self):
- configs = ['line1', 'line2\nline3\n']
+ configs = ["line1", "line2\nline3\n"]
apply_rsyslog_changes(
- configs=configs, def_fname="default.cfg", cfg_dir=self.tmp)
+ configs=configs, def_fname="default.cfg", cfg_dir=self.tmp
+ )
fname = os.path.join(self.tmp, "default.cfg")
- expected_content = '\n'.join([c for c in configs])
+ expected_content = "\n".join([c for c in configs])
found_content = util.load_file(fname)
self.assertEqual(expected_content, found_content)
@@ -152,7 +169,7 @@ class TestRemotesToSyslog(t_help.TestCase):
# str rendered line must appear in remotes_to_ryslog_cfg return
mycfg = "*.* myhost"
myline = str(parse_remotes_line(mycfg, name="myname"))
- r = remotes_to_rsyslog_cfg({'myname': mycfg})
+ r = remotes_to_rsyslog_cfg({"myname": mycfg})
lines = r.splitlines()
self.assertEqual(1, len(lines))
self.assertTrue(myline in r.splitlines())
@@ -161,7 +178,8 @@ class TestRemotesToSyslog(t_help.TestCase):
header = "#foo head"
footer = "#foo foot"
r = remotes_to_rsyslog_cfg(
- {'myname': "*.* myhost"}, header=header, footer=footer)
+ {"myname": "*.* myhost"}, header=header, footer=footer
+ )
lines = r.splitlines()
self.assertTrue(header, lines[0])
self.assertTrue(footer, lines[-1])
@@ -170,9 +188,11 @@ class TestRemotesToSyslog(t_help.TestCase):
mycfg = "*.* myhost"
myline = str(parse_remotes_line(mycfg, name="myname"))
r = remotes_to_rsyslog_cfg(
- {'myname': mycfg, 'removed': None, 'removed2': ""})
+ {"myname": mycfg, "removed": None, "removed2": ""}
+ )
lines = r.splitlines()
self.assertEqual(1, len(lines))
self.assertTrue(myline in r.splitlines())
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_runcmd.py b/tests/unittests/config/test_cc_runcmd.py
new file mode 100644
index 00000000..59490d67
--- /dev/null
+++ b/tests/unittests/config/test_cc_runcmd.py
@@ -0,0 +1,137 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+import logging
+import os
+import stat
+from unittest.mock import patch
+
+from cloudinit import helpers, subp, util
+from cloudinit.config.cc_runcmd import handle, schema
+from tests.unittests.helpers import (
+ CiTestCase,
+ FilesystemMockingTestCase,
+ SchemaTestCaseMixin,
+ skipUnlessJsonSchema,
+)
+from tests.unittests.util import get_cloud
+
+LOG = logging.getLogger(__name__)
+
+
+class TestRuncmd(FilesystemMockingTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestRuncmd, self).setUp()
+ self.subp = subp.subp
+ self.new_root = self.tmp_dir()
+ self.patchUtils(self.new_root)
+ self.paths = helpers.Paths({"scripts": self.new_root})
+
+ def test_handler_skip_if_no_runcmd(self):
+ """When the provided config doesn't contain runcmd, skip it."""
+ cfg = {}
+ mycloud = get_cloud(paths=self.paths)
+ handle("notimportant", cfg, mycloud, LOG, None)
+ self.assertIn(
+ "Skipping module named notimportant, no 'runcmd' key",
+ self.logs.getvalue(),
+ )
+
+ @patch("cloudinit.util.shellify")
+ def test_runcmd_shellify_fails(self, cls):
+ """When shellify fails throw exception"""
+ cls.side_effect = TypeError("patched shellify")
+ valid_config = {"runcmd": ["echo 42"]}
+ cc = get_cloud(paths=self.paths)
+ with self.assertRaises(TypeError) as cm:
+ with self.allow_subp(["/bin/sh"]):
+ handle("cc_runcmd", valid_config, cc, LOG, None)
+ self.assertIn("Failed to shellify", str(cm.exception))
+
+ def test_handler_invalid_command_set(self):
+ """Commands which can't be converted to shell will raise errors."""
+ invalid_config = {"runcmd": 1}
+ cc = get_cloud(paths=self.paths)
+ with self.assertRaises(TypeError) as cm:
+ handle("cc_runcmd", invalid_config, cc, LOG, [])
+ self.assertIn(
+ "Failed to shellify 1 into file"
+ " /var/lib/cloud/instances/iid-datasource-none/scripts/runcmd",
+ str(cm.exception),
+ )
+
+ @skipUnlessJsonSchema()
+ def test_handler_schema_validation_warns_non_array_type(self):
+ """Schema validation warns of non-array type for runcmd key.
+
+ Schema validation is not strict, so runcmd attempts to shellify the
+ invalid content.
+ """
+ invalid_config = {"runcmd": 1}
+ cc = get_cloud(paths=self.paths)
+ with self.assertRaises(TypeError) as cm:
+ handle("cc_runcmd", invalid_config, cc, LOG, [])
+ self.assertIn(
+ "Invalid cloud-config provided:\nruncmd: 1 is not of type 'array'",
+ self.logs.getvalue(),
+ )
+ self.assertIn("Failed to shellify", str(cm.exception))
+
+ @skipUnlessJsonSchema()
+ def test_handler_schema_validation_warns_non_array_item_type(self):
+ """Schema validation warns of non-array or string runcmd items.
+
+ Schema validation is not strict, so runcmd attempts to shellify the
+ invalid content.
+ """
+ invalid_config = {
+ "runcmd": ["ls /", 20, ["wget", "http://stuff/blah"], {"a": "n"}]
+ }
+ cc = get_cloud(paths=self.paths)
+ with self.assertRaises(TypeError) as cm:
+ handle("cc_runcmd", invalid_config, cc, LOG, [])
+ expected_warnings = [
+ "runcmd.1: 20 is not valid under any of the given schemas",
+ "runcmd.3: {'a': 'n'} is not valid under any of the given schema",
+ ]
+ logs = self.logs.getvalue()
+ for warning in expected_warnings:
+ self.assertIn(warning, logs)
+ self.assertIn("Failed to shellify", str(cm.exception))
+
+ def test_handler_write_valid_runcmd_schema_to_file(self):
+ """Valid runcmd schema is written to a runcmd shell script."""
+ valid_config = {"runcmd": [["ls", "/"]]}
+ cc = get_cloud(paths=self.paths)
+ handle("cc_runcmd", valid_config, cc, LOG, [])
+ runcmd_file = os.path.join(
+ self.new_root,
+ "var/lib/cloud/instances/iid-datasource-none/scripts/runcmd",
+ )
+ self.assertEqual("#!/bin/sh\n'ls' '/'\n", util.load_file(runcmd_file))
+ file_stat = os.stat(runcmd_file)
+ self.assertEqual(0o700, stat.S_IMODE(file_stat.st_mode))
+
+
+@skipUnlessJsonSchema()
+class TestSchema(CiTestCase, SchemaTestCaseMixin):
+ """Directly test schema rather than through handle."""
+
+ schema = schema
+
+ def test_duplicates_are_fine_array_array(self):
+ """Duplicated commands array/array entries are allowed."""
+ self.assertSchemaValid(
+ [["echo", "bye"], ["echo", "bye"]],
+ "command entries can be duplicate.",
+ )
+
+ def test_duplicates_are_fine_array_string(self):
+ """Duplicated commands array/string entries are allowed."""
+ self.assertSchemaValid(
+ ["echo bye", "echo bye"], "command entries can be duplicate."
+ )
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_seed_random.py b/tests/unittests/config/test_cc_seed_random.py
new file mode 100644
index 00000000..8b2fdcdd
--- /dev/null
+++ b/tests/unittests/config/test_cc_seed_random.py
@@ -0,0 +1,221 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+# Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
+#
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+#
+# Based on test_handler_set_hostname.py
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+import gzip
+import logging
+import tempfile
+from io import BytesIO
+
+from cloudinit import subp, util
+from cloudinit.config import cc_seed_random
+from tests.unittests import helpers as t_help
+from tests.unittests.util import get_cloud
+
+LOG = logging.getLogger(__name__)
+
+
+class TestRandomSeed(t_help.TestCase):
+ def setUp(self):
+ super(TestRandomSeed, self).setUp()
+ self._seed_file = tempfile.mktemp()
+ self.unapply = []
+
+ # by default 'which' has nothing in its path
+ self.apply_patches([(subp, "which", self._which)])
+ self.apply_patches([(subp, "subp", self._subp)])
+ self.subp_called = []
+ self.whichdata = {}
+
+ def tearDown(self):
+ apply_patches([i for i in reversed(self.unapply)])
+ util.del_file(self._seed_file)
+
+ def apply_patches(self, patches):
+ ret = apply_patches(patches)
+ self.unapply += ret
+
+ def _which(self, program):
+ return self.whichdata.get(program)
+
+ def _subp(self, *args, **kwargs):
+ # supports subp calling with cmd as args or kwargs
+ if "args" not in kwargs:
+ kwargs["args"] = args[0]
+ self.subp_called.append(kwargs)
+ return
+
+ def _compress(self, text):
+ contents = BytesIO()
+ gz_fh = gzip.GzipFile(mode="wb", fileobj=contents)
+ gz_fh.write(text)
+ gz_fh.close()
+ return contents.getvalue()
+
+ def test_append_random(self):
+ cfg = {
+ "random_seed": {
+ "file": self._seed_file,
+ "data": "tiny-tim-was-here",
+ }
+ }
+ cc_seed_random.handle("test", cfg, get_cloud("ubuntu"), LOG, [])
+ contents = util.load_file(self._seed_file)
+ self.assertEqual("tiny-tim-was-here", contents)
+
+ def test_append_random_unknown_encoding(self):
+ data = self._compress(b"tiny-toe")
+ cfg = {
+ "random_seed": {
+ "file": self._seed_file,
+ "data": data,
+ "encoding": "special_encoding",
+ }
+ }
+ self.assertRaises(
+ IOError,
+ cc_seed_random.handle,
+ "test",
+ cfg,
+ get_cloud("ubuntu"),
+ LOG,
+ [],
+ )
+
+ def test_append_random_gzip(self):
+ data = self._compress(b"tiny-toe")
+ cfg = {
+ "random_seed": {
+ "file": self._seed_file,
+ "data": data,
+ "encoding": "gzip",
+ }
+ }
+ cc_seed_random.handle("test", cfg, get_cloud("ubuntu"), LOG, [])
+ contents = util.load_file(self._seed_file)
+ self.assertEqual("tiny-toe", contents)
+
+ def test_append_random_gz(self):
+ data = self._compress(b"big-toe")
+ cfg = {
+ "random_seed": {
+ "file": self._seed_file,
+ "data": data,
+ "encoding": "gz",
+ }
+ }
+ cc_seed_random.handle("test", cfg, get_cloud("ubuntu"), LOG, [])
+ contents = util.load_file(self._seed_file)
+ self.assertEqual("big-toe", contents)
+
+ def test_append_random_base64(self):
+ data = util.b64e("bubbles")
+ cfg = {
+ "random_seed": {
+ "file": self._seed_file,
+ "data": data,
+ "encoding": "base64",
+ }
+ }
+ cc_seed_random.handle("test", cfg, get_cloud("ubuntu"), LOG, [])
+ contents = util.load_file(self._seed_file)
+ self.assertEqual("bubbles", contents)
+
+ def test_append_random_b64(self):
+ data = util.b64e("kit-kat")
+ cfg = {
+ "random_seed": {
+ "file": self._seed_file,
+ "data": data,
+ "encoding": "b64",
+ }
+ }
+ cc_seed_random.handle("test", cfg, get_cloud("ubuntu"), LOG, [])
+ contents = util.load_file(self._seed_file)
+ self.assertEqual("kit-kat", contents)
+
+ def test_append_random_metadata(self):
+ cfg = {
+ "random_seed": {
+ "file": self._seed_file,
+ "data": "tiny-tim-was-here",
+ }
+ }
+ c = get_cloud("ubuntu", metadata={"random_seed": "-so-was-josh"})
+ cc_seed_random.handle("test", cfg, c, LOG, [])
+ contents = util.load_file(self._seed_file)
+ self.assertEqual("tiny-tim-was-here-so-was-josh", contents)
+
+ def test_seed_command_provided_and_available(self):
+ c = get_cloud("ubuntu")
+ self.whichdata = {"pollinate": "/usr/bin/pollinate"}
+ cfg = {"random_seed": {"command": ["pollinate", "-q"]}}
+ cc_seed_random.handle("test", cfg, c, LOG, [])
+
+ subp_args = [f["args"] for f in self.subp_called]
+ self.assertIn(["pollinate", "-q"], subp_args)
+
+ def test_seed_command_not_provided(self):
+ c = get_cloud("ubuntu")
+ self.whichdata = {}
+ cc_seed_random.handle("test", {}, c, LOG, [])
+
+ # subp should not have been called as which would say not available
+ self.assertFalse(self.subp_called)
+
+ def test_unavailable_seed_command_and_required_raises_error(self):
+ c = get_cloud("ubuntu")
+ self.whichdata = {}
+ cfg = {
+ "random_seed": {
+ "command": ["THIS_NO_COMMAND"],
+ "command_required": True,
+ }
+ }
+ self.assertRaises(
+ ValueError, cc_seed_random.handle, "test", cfg, c, LOG, []
+ )
+
+ def test_seed_command_and_required(self):
+ c = get_cloud("ubuntu")
+ self.whichdata = {"foo": "foo"}
+ cfg = {"random_seed": {"command_required": True, "command": ["foo"]}}
+ cc_seed_random.handle("test", cfg, c, LOG, [])
+
+ self.assertIn(["foo"], [f["args"] for f in self.subp_called])
+
+ def test_file_in_environment_for_command(self):
+ c = get_cloud("ubuntu")
+ self.whichdata = {"foo": "foo"}
+ cfg = {
+ "random_seed": {
+ "command_required": True,
+ "command": ["foo"],
+ "file": self._seed_file,
+ }
+ }
+ cc_seed_random.handle("test", cfg, c, LOG, [])
+
+ # this just instists that the first time subp was called,
+ # RANDOM_SEED_FILE was in the environment set up correctly
+ subp_env = [f["env"] for f in self.subp_called]
+ self.assertEqual(subp_env[0].get("RANDOM_SEED_FILE"), self._seed_file)
+
+
+def apply_patches(patches):
+ ret = []
+ for (ref, name, replace) in patches:
+ if replace is None:
+ continue
+ orig = getattr(ref, name)
+ setattr(ref, name, replace)
+ ret.append((ref, name, orig))
+ return ret
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_set_hostname.py b/tests/unittests/config/test_cc_set_hostname.py
new file mode 100644
index 00000000..fd994c4e
--- /dev/null
+++ b/tests/unittests/config/test_cc_set_hostname.py
@@ -0,0 +1,208 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import logging
+import os
+import shutil
+import tempfile
+from io import BytesIO
+from unittest import mock
+
+from configobj import ConfigObj
+
+from cloudinit import cloud, distros, helpers, util
+from cloudinit.config import cc_set_hostname
+from tests.unittests import helpers as t_help
+
+LOG = logging.getLogger(__name__)
+
+
+class TestHostname(t_help.FilesystemMockingTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestHostname, self).setUp()
+ self.tmp = tempfile.mkdtemp()
+ util.ensure_dir(os.path.join(self.tmp, "data"))
+ self.addCleanup(shutil.rmtree, self.tmp)
+
+ def _fetch_distro(self, kind, conf=None):
+ cls = distros.fetch(kind)
+ paths = helpers.Paths({"cloud_dir": self.tmp})
+ conf = {} if conf is None else conf
+ return cls(kind, conf, paths)
+
+ def test_debian_write_hostname_prefer_fqdn(self):
+ cfg = {
+ "hostname": "blah",
+ "prefer_fqdn_over_hostname": True,
+ "fqdn": "blah.yahoo.com",
+ }
+ distro = self._fetch_distro("debian", cfg)
+ paths = helpers.Paths({"cloud_dir": self.tmp})
+ ds = None
+ cc = cloud.Cloud(ds, paths, {}, distro, None)
+ self.patchUtils(self.tmp)
+ cc_set_hostname.handle("cc_set_hostname", cfg, cc, LOG, [])
+ contents = util.load_file("/etc/hostname")
+ self.assertEqual("blah.yahoo.com", contents.strip())
+
+ @mock.patch("cloudinit.distros.Distro.uses_systemd", return_value=False)
+ def test_rhel_write_hostname_prefer_hostname(self, m_uses_systemd):
+ cfg = {
+ "hostname": "blah",
+ "prefer_fqdn_over_hostname": False,
+ "fqdn": "blah.yahoo.com",
+ }
+ distro = self._fetch_distro("rhel", cfg)
+ paths = helpers.Paths({"cloud_dir": self.tmp})
+ ds = None
+ cc = cloud.Cloud(ds, paths, {}, distro, None)
+ self.patchUtils(self.tmp)
+ cc_set_hostname.handle("cc_set_hostname", cfg, cc, LOG, [])
+ contents = util.load_file("/etc/sysconfig/network", decode=False)
+ n_cfg = ConfigObj(BytesIO(contents))
+ self.assertEqual({"HOSTNAME": "blah"}, dict(n_cfg))
+
+ @mock.patch("cloudinit.distros.Distro.uses_systemd", return_value=False)
+ def test_write_hostname_rhel(self, m_uses_systemd):
+ cfg = {"hostname": "blah", "fqdn": "blah.blah.blah.yahoo.com"}
+ distro = self._fetch_distro("rhel")
+ paths = helpers.Paths({"cloud_dir": self.tmp})
+ ds = None
+ cc = cloud.Cloud(ds, paths, {}, distro, None)
+ self.patchUtils(self.tmp)
+ cc_set_hostname.handle("cc_set_hostname", cfg, cc, LOG, [])
+ contents = util.load_file("/etc/sysconfig/network", decode=False)
+ n_cfg = ConfigObj(BytesIO(contents))
+ self.assertEqual({"HOSTNAME": "blah.blah.blah.yahoo.com"}, dict(n_cfg))
+
+ def test_write_hostname_debian(self):
+ cfg = {
+ "hostname": "blah",
+ "fqdn": "blah.blah.blah.yahoo.com",
+ }
+ distro = self._fetch_distro("debian")
+ paths = helpers.Paths({"cloud_dir": self.tmp})
+ ds = None
+ cc = cloud.Cloud(ds, paths, {}, distro, None)
+ self.patchUtils(self.tmp)
+ cc_set_hostname.handle("cc_set_hostname", cfg, cc, LOG, [])
+ contents = util.load_file("/etc/hostname")
+ self.assertEqual("blah", contents.strip())
+
+ @mock.patch("cloudinit.distros.Distro.uses_systemd", return_value=False)
+ def test_write_hostname_sles(self, m_uses_systemd):
+ cfg = {
+ "hostname": "blah.blah.blah.suse.com",
+ }
+ distro = self._fetch_distro("sles")
+ paths = helpers.Paths({"cloud_dir": self.tmp})
+ ds = None
+ cc = cloud.Cloud(ds, paths, {}, distro, None)
+ self.patchUtils(self.tmp)
+ cc_set_hostname.handle("cc_set_hostname", cfg, cc, LOG, [])
+ contents = util.load_file(distro.hostname_conf_fn)
+ self.assertEqual("blah", contents.strip())
+
+ @mock.patch("cloudinit.distros.photon.subp.subp")
+ def test_photon_hostname(self, m_subp):
+ cfg1 = {
+ "hostname": "photon",
+ "prefer_fqdn_over_hostname": True,
+ "fqdn": "test1.vmware.com",
+ }
+ cfg2 = {
+ "hostname": "photon",
+ "prefer_fqdn_over_hostname": False,
+ "fqdn": "test2.vmware.com",
+ }
+
+ ds = None
+ m_subp.return_value = (None, None)
+ distro = self._fetch_distro("photon", cfg1)
+ paths = helpers.Paths({"cloud_dir": self.tmp})
+ cc = cloud.Cloud(ds, paths, {}, distro, None)
+ for c in [cfg1, cfg2]:
+ cc_set_hostname.handle("cc_set_hostname", c, cc, LOG, [])
+ print("\n", m_subp.call_args_list)
+ if c["prefer_fqdn_over_hostname"]:
+ assert [
+ mock.call(
+ ["hostnamectl", "set-hostname", c["fqdn"]],
+ capture=True,
+ )
+ ] in m_subp.call_args_list
+ assert [
+ mock.call(
+ ["hostnamectl", "set-hostname", c["hostname"]],
+ capture=True,
+ )
+ ] not in m_subp.call_args_list
+ else:
+ assert [
+ mock.call(
+ ["hostnamectl", "set-hostname", c["hostname"]],
+ capture=True,
+ )
+ ] in m_subp.call_args_list
+ assert [
+ mock.call(
+ ["hostnamectl", "set-hostname", c["fqdn"]],
+ capture=True,
+ )
+ ] not in m_subp.call_args_list
+
+ def test_multiple_calls_skips_unchanged_hostname(self):
+ """Only new hostname or fqdn values will generate a hostname call."""
+ distro = self._fetch_distro("debian")
+ paths = helpers.Paths({"cloud_dir": self.tmp})
+ ds = None
+ cc = cloud.Cloud(ds, paths, {}, distro, None)
+ self.patchUtils(self.tmp)
+ cc_set_hostname.handle(
+ "cc_set_hostname", {"hostname": "hostname1.me.com"}, cc, LOG, []
+ )
+ contents = util.load_file("/etc/hostname")
+ self.assertEqual("hostname1", contents.strip())
+ cc_set_hostname.handle(
+ "cc_set_hostname", {"hostname": "hostname1.me.com"}, cc, LOG, []
+ )
+ self.assertIn(
+ "DEBUG: No hostname changes. Skipping set-hostname\n",
+ self.logs.getvalue(),
+ )
+ cc_set_hostname.handle(
+ "cc_set_hostname", {"hostname": "hostname2.me.com"}, cc, LOG, []
+ )
+ contents = util.load_file("/etc/hostname")
+ self.assertEqual("hostname2", contents.strip())
+ self.assertIn(
+ "Non-persistently setting the system hostname to hostname2",
+ self.logs.getvalue(),
+ )
+
+ def test_error_on_distro_set_hostname_errors(self):
+ """Raise SetHostnameError on exceptions from distro.set_hostname."""
+ distro = self._fetch_distro("debian")
+
+ def set_hostname_error(hostname, fqdn):
+ raise Exception("OOPS on: %s" % fqdn)
+
+ distro.set_hostname = set_hostname_error
+ paths = helpers.Paths({"cloud_dir": self.tmp})
+ ds = None
+ cc = cloud.Cloud(ds, paths, {}, distro, None)
+ self.patchUtils(self.tmp)
+ with self.assertRaises(cc_set_hostname.SetHostnameError) as ctx_mgr:
+ cc_set_hostname.handle(
+ "somename", {"hostname": "hostname1.me.com"}, cc, LOG, []
+ )
+ self.assertEqual(
+ "Failed to set the hostname to hostname1.me.com (hostname1):"
+ " OOPS on: hostname1.me.com",
+ str(ctx_mgr.exception),
+ )
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_set_passwords.py b/tests/unittests/config/test_cc_set_passwords.py
new file mode 100644
index 00000000..bc81214b
--- /dev/null
+++ b/tests/unittests/config/test_cc_set_passwords.py
@@ -0,0 +1,177 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from unittest import mock
+
+from cloudinit import util
+from cloudinit.config import cc_set_passwords as setpass
+from tests.unittests.helpers import CiTestCase
+
+MODPATH = "cloudinit.config.cc_set_passwords."
+
+
+class TestHandleSshPwauth(CiTestCase):
+ """Test cc_set_passwords handling of ssh_pwauth in handle_ssh_pwauth."""
+
+ with_logs = True
+
+ @mock.patch("cloudinit.distros.subp.subp")
+ def test_unknown_value_logs_warning(self, m_subp):
+ cloud = self.tmp_cloud(distro="ubuntu")
+ setpass.handle_ssh_pwauth("floo", cloud.distro)
+ self.assertIn(
+ "Unrecognized value: ssh_pwauth=floo", self.logs.getvalue()
+ )
+ m_subp.assert_not_called()
+
+ @mock.patch(MODPATH + "update_ssh_config", return_value=True)
+ @mock.patch("cloudinit.distros.subp.subp")
+ def test_systemctl_as_service_cmd(self, m_subp, m_update_ssh_config):
+ """If systemctl in service cmd: systemctl restart name."""
+ cloud = self.tmp_cloud(distro="ubuntu")
+ cloud.distro.init_cmd = ["systemctl"]
+ setpass.handle_ssh_pwauth(True, cloud.distro)
+ m_subp.assert_called_with(
+ ["systemctl", "restart", "ssh"], capture=True
+ )
+
+ @mock.patch(MODPATH + "update_ssh_config", return_value=False)
+ @mock.patch("cloudinit.distros.subp.subp")
+ def test_not_restarted_if_not_updated(self, m_subp, m_update_ssh_config):
+ """If config is not updated, then no system restart should be done."""
+ cloud = self.tmp_cloud(distro="ubuntu")
+ setpass.handle_ssh_pwauth(True, cloud.distro)
+ m_subp.assert_not_called()
+ self.assertIn("No need to restart SSH", self.logs.getvalue())
+
+ @mock.patch(MODPATH + "update_ssh_config", return_value=True)
+ @mock.patch("cloudinit.distros.subp.subp")
+ def test_unchanged_does_nothing(self, m_subp, m_update_ssh_config):
+ """If 'unchanged', then no updates to config and no restart."""
+ cloud = self.tmp_cloud(distro="ubuntu")
+ setpass.handle_ssh_pwauth("unchanged", cloud.distro)
+ m_update_ssh_config.assert_not_called()
+ m_subp.assert_not_called()
+
+ @mock.patch("cloudinit.distros.subp.subp")
+ def test_valid_change_values(self, m_subp):
+ """If value is a valid changen value, then update should be called."""
+ cloud = self.tmp_cloud(distro="ubuntu")
+ upname = MODPATH + "update_ssh_config"
+ optname = "PasswordAuthentication"
+ for value in util.FALSE_STRINGS + util.TRUE_STRINGS:
+ optval = "yes" if value in util.TRUE_STRINGS else "no"
+ with mock.patch(upname, return_value=False) as m_update:
+ setpass.handle_ssh_pwauth(value, cloud.distro)
+ m_update.assert_called_with({optname: optval})
+ m_subp.assert_not_called()
+
+
+class TestSetPasswordsHandle(CiTestCase):
+ """Test cc_set_passwords.handle"""
+
+ with_logs = True
+
+ def test_handle_on_empty_config(self, *args):
+ """handle logs that no password has changed when config is empty."""
+ cloud = self.tmp_cloud(distro="ubuntu")
+ setpass.handle(
+ "IGNORED", cfg={}, cloud=cloud, log=self.logger, args=[]
+ )
+ self.assertEqual(
+ "DEBUG: Leaving SSH config 'PasswordAuthentication' unchanged. "
+ "ssh_pwauth=None\n",
+ self.logs.getvalue(),
+ )
+
+ def test_handle_on_chpasswd_list_parses_common_hashes(self):
+ """handle parses command password hashes."""
+ cloud = self.tmp_cloud(distro="ubuntu")
+ valid_hashed_pwds = [
+ "root:$2y$10$8BQjxjVByHA/Ee.O1bCXtO8S7Y5WojbXWqnqYpUW.BrPx/"
+ "Dlew1Va",
+ "ubuntu:$6$5hOurLPO$naywm3Ce0UlmZg9gG2Fl9acWCVEoakMMC7dR52q"
+ "SDexZbrN9z8yHxhUM2b.sxpguSwOlbOQSW/HpXazGGx3oo1",
+ ]
+ cfg = {"chpasswd": {"list": valid_hashed_pwds}}
+ with mock.patch.object(setpass, "chpasswd") as chpasswd:
+ setpass.handle(
+ "IGNORED", cfg=cfg, cloud=cloud, log=self.logger, args=[]
+ )
+ self.assertIn(
+ "DEBUG: Handling input for chpasswd as list.", self.logs.getvalue()
+ )
+ self.assertIn(
+ "DEBUG: Setting hashed password for ['root', 'ubuntu']",
+ self.logs.getvalue(),
+ )
+ valid = "\n".join(valid_hashed_pwds) + "\n"
+ called = chpasswd.call_args[0][1]
+ self.assertEqual(valid, called)
+
+ @mock.patch(MODPATH + "util.is_BSD")
+ @mock.patch(MODPATH + "subp.subp")
+ def test_bsd_calls_custom_pw_cmds_to_set_and_expire_passwords(
+ self, m_subp, m_is_bsd
+ ):
+ """BSD don't use chpasswd"""
+ m_is_bsd.return_value = True
+ cloud = self.tmp_cloud(distro="freebsd")
+ valid_pwds = ["ubuntu:passw0rd"]
+ cfg = {"chpasswd": {"list": valid_pwds}}
+ setpass.handle(
+ "IGNORED", cfg=cfg, cloud=cloud, log=self.logger, args=[]
+ )
+ self.assertEqual(
+ [
+ mock.call(
+ ["pw", "usermod", "ubuntu", "-h", "0"],
+ data="passw0rd",
+ logstring="chpasswd for ubuntu",
+ ),
+ mock.call(["pw", "usermod", "ubuntu", "-p", "01-Jan-1970"]),
+ ],
+ m_subp.call_args_list,
+ )
+
+ @mock.patch(MODPATH + "util.multi_log")
+ @mock.patch(MODPATH + "subp.subp")
+ def test_handle_on_chpasswd_list_creates_random_passwords(
+ self, m_subp, m_multi_log
+ ):
+ """handle parses command set random passwords."""
+ cloud = self.tmp_cloud(distro="ubuntu")
+ valid_random_pwds = ["root:R", "ubuntu:RANDOM"]
+ cfg = {"chpasswd": {"expire": "false", "list": valid_random_pwds}}
+ with mock.patch.object(setpass, "chpasswd") as chpasswd:
+ setpass.handle(
+ "IGNORED", cfg=cfg, cloud=cloud, log=self.logger, args=[]
+ )
+ self.assertIn(
+ "DEBUG: Handling input for chpasswd as list.", self.logs.getvalue()
+ )
+ self.assertEqual(1, chpasswd.call_count)
+ passwords, _ = chpasswd.call_args
+ user_pass = {
+ user: password
+ for user, password in (
+ line.split(":") for line in passwords[1].splitlines()
+ )
+ }
+
+ self.assertEqual(1, m_multi_log.call_count)
+ self.assertEqual(
+ mock.call(mock.ANY, stderr=False, fallback_to_stdout=False),
+ m_multi_log.call_args,
+ )
+
+ self.assertEqual(set(["root", "ubuntu"]), set(user_pass.keys()))
+ written_lines = m_multi_log.call_args[0][0].splitlines()
+ for password in user_pass.values():
+ for line in written_lines:
+ if password in line:
+ break
+ else:
+ self.fail("Password not emitted to console")
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_snap.py b/tests/unittests/config/test_cc_snap.py
new file mode 100644
index 00000000..1632676d
--- /dev/null
+++ b/tests/unittests/config/test_cc_snap.py
@@ -0,0 +1,640 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import re
+from io import StringIO
+
+from cloudinit import util
+from cloudinit.config.cc_snap import (
+ ASSERTIONS_FILE,
+ add_assertions,
+ handle,
+ maybe_install_squashfuse,
+ run_commands,
+ schema,
+)
+from cloudinit.config.schema import validate_cloudconfig_schema
+from tests.unittests.helpers import (
+ CiTestCase,
+ SchemaTestCaseMixin,
+ mock,
+ skipUnlessJsonSchema,
+ wrap_and_call,
+)
+
+SYSTEM_USER_ASSERTION = """\
+type: system-user
+authority-id: LqvZQdfyfGlYvtep4W6Oj6pFXP9t1Ksp
+brand-id: LqvZQdfyfGlYvtep4W6Oj6pFXP9t1Ksp
+email: foo@bar.com
+password: $6$E5YiAuMIPAwX58jG$miomhVNui/vf7f/3ctB/f0RWSKFxG0YXzrJ9rtJ1ikvzt
+series:
+- 16
+since: 2016-09-10T16:34:00+03:00
+until: 2017-11-10T16:34:00+03:00
+username: baz
+sign-key-sha3-384: RuVvnp4n52GilycjfbbTCI3_L8Y6QlIE75wxMc0KzGV3AUQqVd9GuXoj
+
+AcLBXAQAAQoABgUCV/UU1wAKCRBKnlMoJQLkZVeLD/9/+hIeVywtzsDA3oxl+P+u9D13y9s6svP
+Jd6Wnf4FTw6sq1GjBE4ZA7lrwSaRCUJ9Vcsvf2q9OGPY7mOb2TBxaDe0PbUMjrSrqllSSQwhpNI
+zG+NxkkKuxsUmLzFa+k9m6cyojNbw5LFhQZBQCGlr3JYqC0tIREq/UsZxj+90TUC87lDJwkU8GF
+s4CR+rejZj4itIcDcVxCSnJH6hv6j2JrJskJmvObqTnoOlcab+JXdamXqbldSP3UIhWoyVjqzkj
++to7mXgx+cCUA9+ngNCcfUG+1huGGTWXPCYkZ78HvErcRlIdeo4d3xwtz1cl/w3vYnq9og1XwsP
+Yfetr3boig2qs1Y+j/LpsfYBYncgWjeDfAB9ZZaqQz/oc8n87tIPZDJHrusTlBfop8CqcM4xsKS
+d+wnEY8e/F24mdSOYmS1vQCIDiRU3MKb6x138Ud6oHXFlRBbBJqMMctPqWDunWzb5QJ7YR0I39q
+BrnEqv5NE0G7w6HOJ1LSPG5Hae3P4T2ea+ATgkb03RPr3KnXnzXg4TtBbW1nytdlgoNc/BafE1H
+f3NThcq9gwX4xWZ2PAWnqVPYdDMyCtzW3Ck+o6sIzx+dh4gDLPHIi/6TPe/pUuMop9CBpWwez7V
+v1z+1+URx6Xlq3Jq18y5pZ6fY3IDJ6km2nQPMzcm4Q=="""
+
+ACCOUNT_ASSERTION = """\
+type: account-key
+authority-id: canonical
+revision: 2
+public-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0
+account-id: canonical
+name: store
+since: 2016-04-01T00:00:00.0Z
+body-length: 717
+sign-key-sha3-384: -CvQKAwRQ5h3Ffn10FILJoEZUXOv6km9FwA80-Rcj-f-6jadQ89VRswH
+
+AcbBTQRWhcGAARAA0KKYYQWuHOrsFVi4p4l7ZzSvX7kLgJFFeFgOkzdWKBTHEnsMKjl5mefFe9j
+qe8NlmJdfY7BenP7XeBtwKp700H/t9lLrZbpTNAPHXYxEWFJp5bPqIcJYBZ+29oLVLN1Tc5X482
+vCiDqL8+pPYqBrK2fNlyPlNNSum9wI70rDDL4r6FVvr+osTnGejibdV8JphWX+lrSQDnRSdM8KJ
+UM43vTgLGTi9W54oRhsA2OFexRfRksTrnqGoonCjqX5wO3OFSaMDzMsO2MJ/hPfLgDqw53qjzuK
+Iec9OL3k5basvu2cj5u9tKwVFDsCKK2GbKUsWWpx2KTpOifmhmiAbzkTHbH9KaoMS7p0kJwhTQG
+o9aJ9VMTWHJc/NCBx7eu451u6d46sBPCXS/OMUh2766fQmoRtO1OwCTxsRKG2kkjbMn54UdFULl
+VfzvyghMNRKIezsEkmM8wueTqGUGZWa6CEZqZKwhe/PROxOPYzqtDH18XZknbU1n5lNb7vNfem9
+2ai+3+JyFnW9UhfvpVF7gzAgdyCqNli4C6BIN43uwoS8HkykocZS/+Gv52aUQ/NZ8BKOHLw+7an
+Q0o8W9ltSLZbEMxFIPSN0stiZlkXAp6DLyvh1Y4wXSynDjUondTpej2fSvSlCz/W5v5V7qA4nIc
+vUvV7RjVzv17ut0AEQEAAQ==
+
+AcLDXAQAAQoABgUCV83k9QAKCRDUpVvql9g3IBT8IACKZ7XpiBZ3W4lqbPssY6On81WmxQLtvsM
+WTp6zZpl/wWOSt2vMNUk9pvcmrNq1jG9CuhDfWFLGXEjcrrmVkN3YuCOajMSPFCGrxsIBLSRt/b
+nrKykdLAAzMfG8rP1d82bjFFiIieE+urQ0Kcv09Jtdvavq3JT1Tek5mFyyfhHNlQEKOzWqmRWiL
+3c3VOZUs1ZD8TSlnuq/x+5T0X0YtOyGjSlVxk7UybbyMNd6MZfNaMpIG4x+mxD3KHFtBAC7O6kL
+eX3i6j5nCY5UABfA3DZEAkWP4zlmdBEOvZ9t293NaDdOpzsUHRkoi0Zez/9BHQ/kwx/uNc2WqrY
+inCmu16JGNeXqsyinnLl7Ghn2RwhvDMlLxF6RTx8xdx1yk6p3PBTwhZMUvuZGjUtN/AG8BmVJQ1
+rsGSRkkSywvnhVJRB2sudnrMBmNS2goJbzSbmJnOlBrd2WsV0T9SgNMWZBiov3LvU4o2SmAb6b+
+rYwh8H5QHcuuYJuxDjFhPswIp6Wes5T6hUicf3SWtObcDS4HSkVS4ImBjjX9YgCuFy7QdnooOWE
+aPvkRw3XCVeYq0K6w9GRsk1YFErD4XmXXZjDYY650MX9v42Sz5MmphHV8jdIY5ssbadwFSe2rCQ
+6UX08zy7RsIb19hTndE6ncvSNDChUR9eEnCm73eYaWTWTnq1cxdVP/s52r8uss++OYOkPWqh5nO
+haRn7INjH/yZX4qXjNXlTjo0PnHH0q08vNKDwLhxS+D9du+70FeacXFyLIbcWllSbJ7DmbumGpF
+yYbtj3FDDPzachFQdIG3lSt+cSUGeyfSs6wVtc3cIPka/2Urx7RprfmoWSI6+a5NcLdj0u2z8O9
+HxeIgxDpg/3gT8ZIuFKePMcLDM19Fh/p0ysCsX+84B9chNWtsMSmIaE57V+959MVtsLu7SLb9gi
+skrju0pQCwsu2wHMLTNd1f3PTHmrr49hxetTus07HSQUApMtAGKzQilF5zqFjbyaTd4xgQbd+PK
+CjFyzQTDOcUhXpuUGt/IzlqiFfsCsmbj2K4KdSNYMlqIgZ3Azu8KvZLIhsyN7v5vNIZSPfEbjde
+ClU9r0VRiJmtYBUjcSghD9LWn+yRLwOxhfQVjm0cBwIt5R/yPF/qC76yIVuWUtM5Y2/zJR1J8OF
+qWchvlImHtvDzS9FQeLyzJAOjvZ2CnWp2gILgUz0WQdOk1Dq8ax7KS9BQ42zxw9EZAEPw3PEFqR
+IQsRTONp+iVS8YxSmoYZjDlCgRMWUmawez/Fv5b9Fb/XkO5Eq4e+KfrpUujXItaipb+tV8h5v3t
+oG3Ie3WOHrVjCLXIdYslpL1O4nadqR6Xv58pHj6k"""
+
+
+class FakeCloud(object):
+ def __init__(self, distro):
+ self.distro = distro
+
+
+class TestAddAssertions(CiTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestAddAssertions, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ @mock.patch("cloudinit.config.cc_snap.subp.subp")
+ def test_add_assertions_on_empty_list(self, m_subp):
+ """When provided with an empty list, add_assertions does nothing."""
+ add_assertions([])
+ self.assertEqual("", self.logs.getvalue())
+ m_subp.assert_not_called()
+
+ def test_add_assertions_on_non_list_or_dict(self):
+ """When provided an invalid type, add_assertions raises an error."""
+ with self.assertRaises(TypeError) as context_manager:
+ add_assertions(assertions="I'm Not Valid")
+ self.assertEqual(
+ "assertion parameter was not a list or dict: I'm Not Valid",
+ str(context_manager.exception),
+ )
+
+ @mock.patch("cloudinit.config.cc_snap.subp.subp")
+ def test_add_assertions_adds_assertions_as_list(self, m_subp):
+ """When provided with a list, add_assertions adds all assertions."""
+ self.assertEqual(
+ ASSERTIONS_FILE, "/var/lib/cloud/instance/snapd.assertions"
+ )
+ assert_file = self.tmp_path("snapd.assertions", dir=self.tmp)
+ assertions = [SYSTEM_USER_ASSERTION, ACCOUNT_ASSERTION]
+ wrap_and_call(
+ "cloudinit.config.cc_snap",
+ {"ASSERTIONS_FILE": {"new": assert_file}},
+ add_assertions,
+ assertions,
+ )
+ self.assertIn(
+ "Importing user-provided snap assertions", self.logs.getvalue()
+ )
+ self.assertIn("sertions", self.logs.getvalue())
+ self.assertEqual(
+ [mock.call(["snap", "ack", assert_file], capture=True)],
+ m_subp.call_args_list,
+ )
+ compare_file = self.tmp_path("comparison", dir=self.tmp)
+ util.write_file(compare_file, "\n".join(assertions).encode("utf-8"))
+ self.assertEqual(
+ util.load_file(compare_file), util.load_file(assert_file)
+ )
+
+ @mock.patch("cloudinit.config.cc_snap.subp.subp")
+ def test_add_assertions_adds_assertions_as_dict(self, m_subp):
+ """When provided with a dict, add_assertions adds all assertions."""
+ self.assertEqual(
+ ASSERTIONS_FILE, "/var/lib/cloud/instance/snapd.assertions"
+ )
+ assert_file = self.tmp_path("snapd.assertions", dir=self.tmp)
+ assertions = {"00": SYSTEM_USER_ASSERTION, "01": ACCOUNT_ASSERTION}
+ wrap_and_call(
+ "cloudinit.config.cc_snap",
+ {"ASSERTIONS_FILE": {"new": assert_file}},
+ add_assertions,
+ assertions,
+ )
+ self.assertIn(
+ "Importing user-provided snap assertions", self.logs.getvalue()
+ )
+ self.assertIn(
+ "DEBUG: Snap acking: ['type: system-user', 'authority-id: Lqv",
+ self.logs.getvalue(),
+ )
+ self.assertIn(
+ "DEBUG: Snap acking: ['type: account-key', 'authority-id: canonic",
+ self.logs.getvalue(),
+ )
+ self.assertEqual(
+ [mock.call(["snap", "ack", assert_file], capture=True)],
+ m_subp.call_args_list,
+ )
+ compare_file = self.tmp_path("comparison", dir=self.tmp)
+ combined = "\n".join(assertions.values())
+ util.write_file(compare_file, combined.encode("utf-8"))
+ self.assertEqual(
+ util.load_file(compare_file), util.load_file(assert_file)
+ )
+
+
+class TestRunCommands(CiTestCase):
+
+ with_logs = True
+ allowed_subp = [CiTestCase.SUBP_SHELL_TRUE]
+
+ def setUp(self):
+ super(TestRunCommands, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ @mock.patch("cloudinit.config.cc_snap.subp.subp")
+ def test_run_commands_on_empty_list(self, m_subp):
+ """When provided with an empty list, run_commands does nothing."""
+ run_commands([])
+ self.assertEqual("", self.logs.getvalue())
+ m_subp.assert_not_called()
+
+ def test_run_commands_on_non_list_or_dict(self):
+ """When provided an invalid type, run_commands raises an error."""
+ with self.assertRaises(TypeError) as context_manager:
+ run_commands(commands="I'm Not Valid")
+ self.assertEqual(
+ "commands parameter was not a list or dict: I'm Not Valid",
+ str(context_manager.exception),
+ )
+
+ def test_run_command_logs_commands_and_exit_codes_to_stderr(self):
+ """All exit codes are logged to stderr."""
+ outfile = self.tmp_path("output.log", dir=self.tmp)
+
+ cmd1 = 'echo "HI" >> %s' % outfile
+ cmd2 = "bogus command"
+ cmd3 = 'echo "MOM" >> %s' % outfile
+ commands = [cmd1, cmd2, cmd3]
+
+ mock_path = "cloudinit.config.cc_snap.sys.stderr"
+ with mock.patch(mock_path, new_callable=StringIO) as m_stderr:
+ with self.assertRaises(RuntimeError) as context_manager:
+ run_commands(commands=commands)
+
+ self.assertIsNotNone(
+ re.search(
+ r"bogus: (command )?not found", str(context_manager.exception)
+ ),
+ msg="Expected bogus command not found",
+ )
+ expected_stderr_log = "\n".join(
+ [
+ "Begin run command: {cmd}".format(cmd=cmd1),
+ "End run command: exit(0)",
+ "Begin run command: {cmd}".format(cmd=cmd2),
+ "ERROR: End run command: exit(127)",
+ "Begin run command: {cmd}".format(cmd=cmd3),
+ "End run command: exit(0)\n",
+ ]
+ )
+ self.assertEqual(expected_stderr_log, m_stderr.getvalue())
+
+ def test_run_command_as_lists(self):
+ """When commands are specified as a list, run them in order."""
+ outfile = self.tmp_path("output.log", dir=self.tmp)
+
+ cmd1 = 'echo "HI" >> %s' % outfile
+ cmd2 = 'echo "MOM" >> %s' % outfile
+ commands = [cmd1, cmd2]
+ mock_path = "cloudinit.config.cc_snap.sys.stderr"
+ with mock.patch(mock_path, new_callable=StringIO):
+ run_commands(commands=commands)
+
+ self.assertIn(
+ "DEBUG: Running user-provided snap commands", self.logs.getvalue()
+ )
+ self.assertEqual("HI\nMOM\n", util.load_file(outfile))
+ self.assertIn(
+ "WARNING: Non-snap commands in snap config:", self.logs.getvalue()
+ )
+
+ def test_run_command_dict_sorted_as_command_script(self):
+ """When commands are a dict, sort them and run."""
+ outfile = self.tmp_path("output.log", dir=self.tmp)
+ cmd1 = 'echo "HI" >> %s' % outfile
+ cmd2 = 'echo "MOM" >> %s' % outfile
+ commands = {"02": cmd1, "01": cmd2}
+ mock_path = "cloudinit.config.cc_snap.sys.stderr"
+ with mock.patch(mock_path, new_callable=StringIO):
+ run_commands(commands=commands)
+
+ expected_messages = ["DEBUG: Running user-provided snap commands"]
+ for message in expected_messages:
+ self.assertIn(message, self.logs.getvalue())
+ self.assertEqual("MOM\nHI\n", util.load_file(outfile))
+
+
+@skipUnlessJsonSchema()
+class TestSchema(CiTestCase, SchemaTestCaseMixin):
+
+ with_logs = True
+ schema = schema
+
+ def test_schema_warns_on_snap_not_as_dict(self):
+ """If the snap configuration is not a dict, emit a warning."""
+ validate_cloudconfig_schema({"snap": "wrong type"}, schema)
+ self.assertEqual(
+ "WARNING: Invalid cloud-config provided:\nsnap: 'wrong type'"
+ " is not of type 'object'\n",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("cloudinit.config.cc_snap.run_commands")
+ def test_schema_disallows_unknown_keys(self, _):
+ """Unknown keys in the snap configuration emit warnings."""
+ validate_cloudconfig_schema(
+ {"snap": {"commands": ["ls"], "invalid-key": ""}}, schema
+ )
+ self.assertIn(
+ "WARNING: Invalid cloud-config provided:\nsnap: Additional"
+ " properties are not allowed ('invalid-key' was unexpected)",
+ self.logs.getvalue(),
+ )
+
+ def test_warn_schema_requires_either_commands_or_assertions(self):
+ """Warn when snap configuration lacks both commands and assertions."""
+ validate_cloudconfig_schema({"snap": {}}, schema)
+ self.assertIn(
+ "WARNING: Invalid cloud-config provided:\nsnap: {} does not"
+ " have enough properties",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("cloudinit.config.cc_snap.run_commands")
+ def test_warn_schema_commands_is_not_list_or_dict(self, _):
+ """Warn when snap:commands config is not a list or dict."""
+ validate_cloudconfig_schema({"snap": {"commands": "broken"}}, schema)
+ self.assertEqual(
+ "WARNING: Invalid cloud-config provided:\nsnap.commands: 'broken'"
+ " is not of type 'object', 'array'\n",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("cloudinit.config.cc_snap.run_commands")
+ def test_warn_schema_when_commands_is_empty(self, _):
+ """Emit warnings when snap:commands is an empty list or dict."""
+ validate_cloudconfig_schema({"snap": {"commands": []}}, schema)
+ validate_cloudconfig_schema({"snap": {"commands": {}}}, schema)
+ self.assertEqual(
+ "WARNING: Invalid cloud-config provided:\nsnap.commands: [] is"
+ " too short\nWARNING: Invalid cloud-config provided:\n"
+ "snap.commands: {} does not have enough properties\n",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("cloudinit.config.cc_snap.run_commands")
+ def test_schema_when_commands_are_list_or_dict(self, _):
+ """No warnings when snap:commands are either a list or dict."""
+ validate_cloudconfig_schema({"snap": {"commands": ["valid"]}}, schema)
+ validate_cloudconfig_schema(
+ {"snap": {"commands": {"01": "also valid"}}}, schema
+ )
+ self.assertEqual("", self.logs.getvalue())
+
+ @mock.patch("cloudinit.config.cc_snap.run_commands")
+ def test_schema_when_commands_values_are_invalid_type(self, _):
+ """Warnings when snap:commands values are invalid type (e.g. int)"""
+ validate_cloudconfig_schema({"snap": {"commands": [123]}}, schema)
+ validate_cloudconfig_schema(
+ {"snap": {"commands": {"01": 123}}}, schema
+ )
+ self.assertEqual(
+ "WARNING: Invalid cloud-config provided:\n"
+ "snap.commands.0: 123 is not valid under any of the given"
+ " schemas\n"
+ "WARNING: Invalid cloud-config provided:\n"
+ "snap.commands.01: 123 is not valid under any of the given"
+ " schemas\n",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("cloudinit.config.cc_snap.run_commands")
+ def test_schema_when_commands_list_values_are_invalid_type(self, _):
+ """Warnings when snap:commands list values are wrong type (e.g. int)"""
+ validate_cloudconfig_schema(
+ {"snap": {"commands": [["snap", "install", 123]]}}, schema
+ )
+ validate_cloudconfig_schema(
+ {"snap": {"commands": {"01": ["snap", "install", 123]}}}, schema
+ )
+ self.assertEqual(
+ "WARNING: Invalid cloud-config provided:\n"
+ "snap.commands.0: ['snap', 'install', 123] is not valid under any"
+ " of the given schemas\n",
+ "WARNING: Invalid cloud-config provided:\n"
+ "snap.commands.0: ['snap', 'install', 123] is not valid under any"
+ " of the given schemas\n",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("cloudinit.config.cc_snap.run_commands")
+ def test_schema_when_assertions_values_are_invalid_type(self, _):
+ """Warnings when snap:assertions values are invalid type (e.g. int)"""
+ validate_cloudconfig_schema({"snap": {"assertions": [123]}}, schema)
+ validate_cloudconfig_schema(
+ {"snap": {"assertions": {"01": 123}}}, schema
+ )
+ self.assertEqual(
+ "WARNING: Invalid cloud-config provided:\n"
+ "snap.assertions.0: 123 is not of type 'string'\n"
+ "WARNING: Invalid cloud-config provided:\n"
+ "snap.assertions.01: 123 is not of type 'string'\n",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("cloudinit.config.cc_snap.add_assertions")
+ def test_warn_schema_assertions_is_not_list_or_dict(self, _):
+ """Warn when snap:assertions config is not a list or dict."""
+ validate_cloudconfig_schema({"snap": {"assertions": "broken"}}, schema)
+ self.assertEqual(
+ "WARNING: Invalid cloud-config provided:\nsnap.assertions:"
+ " 'broken' is not of type 'object', 'array'\n",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("cloudinit.config.cc_snap.add_assertions")
+ def test_warn_schema_when_assertions_is_empty(self, _):
+ """Emit warnings when snap:assertions is an empty list or dict."""
+ validate_cloudconfig_schema({"snap": {"assertions": []}}, schema)
+ validate_cloudconfig_schema({"snap": {"assertions": {}}}, schema)
+ self.assertEqual(
+ "WARNING: Invalid cloud-config provided:\nsnap.assertions: []"
+ " is too short\n"
+ "WARNING: Invalid cloud-config provided:\nsnap.assertions: {}"
+ " does not have enough properties\n",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("cloudinit.config.cc_snap.add_assertions")
+ def test_schema_when_assertions_are_list_or_dict(self, _):
+ """No warnings when snap:assertions are a list or dict."""
+ validate_cloudconfig_schema(
+ {"snap": {"assertions": ["valid"]}}, schema
+ )
+ validate_cloudconfig_schema(
+ {"snap": {"assertions": {"01": "also valid"}}}, schema
+ )
+ self.assertEqual("", self.logs.getvalue())
+
+ def test_duplicates_are_fine_array_array(self):
+ """Duplicated commands array/array entries are allowed."""
+ self.assertSchemaValid(
+ {"commands": [["echo", "bye"], ["echo", "bye"]]},
+ "command entries can be duplicate.",
+ )
+
+ def test_duplicates_are_fine_array_string(self):
+ """Duplicated commands array/string entries are allowed."""
+ self.assertSchemaValid(
+ {"commands": ["echo bye", "echo bye"]},
+ "command entries can be duplicate.",
+ )
+
+ def test_duplicates_are_fine_dict_array(self):
+ """Duplicated commands dict/array entries are allowed."""
+ self.assertSchemaValid(
+ {"commands": {"00": ["echo", "bye"], "01": ["echo", "bye"]}},
+ "command entries can be duplicate.",
+ )
+
+ def test_duplicates_are_fine_dict_string(self):
+ """Duplicated commands dict/string entries are allowed."""
+ self.assertSchemaValid(
+ {"commands": {"00": "echo bye", "01": "echo bye"}},
+ "command entries can be duplicate.",
+ )
+
+
+class TestHandle(CiTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestHandle, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ @mock.patch("cloudinit.config.cc_snap.run_commands")
+ @mock.patch("cloudinit.config.cc_snap.add_assertions")
+ @mock.patch("cloudinit.config.cc_snap.validate_cloudconfig_schema")
+ def test_handle_no_config(self, m_schema, m_add, m_run):
+ """When no snap-related configuration is provided, nothing happens."""
+ cfg = {}
+ handle("snap", cfg=cfg, cloud=None, log=self.logger, args=None)
+ self.assertIn(
+ "DEBUG: Skipping module named snap, no 'snap' key in config",
+ self.logs.getvalue(),
+ )
+ m_schema.assert_not_called()
+ m_add.assert_not_called()
+ m_run.assert_not_called()
+
+ @mock.patch("cloudinit.config.cc_snap.run_commands")
+ @mock.patch("cloudinit.config.cc_snap.add_assertions")
+ @mock.patch("cloudinit.config.cc_snap.maybe_install_squashfuse")
+ def test_handle_skips_squashfuse_when_unconfigured(
+ self, m_squash, m_add, m_run
+ ):
+ """When squashfuse_in_container is unset, don't attempt to install."""
+ handle(
+ "snap", cfg={"snap": {}}, cloud=None, log=self.logger, args=None
+ )
+ handle(
+ "snap",
+ cfg={"snap": {"squashfuse_in_container": None}},
+ cloud=None,
+ log=self.logger,
+ args=None,
+ )
+ handle(
+ "snap",
+ cfg={"snap": {"squashfuse_in_container": False}},
+ cloud=None,
+ log=self.logger,
+ args=None,
+ )
+ self.assertEqual([], m_squash.call_args_list) # No calls
+ # snap configuration missing assertions and commands will default to []
+ self.assertIn(mock.call([]), m_add.call_args_list)
+ self.assertIn(mock.call([]), m_run.call_args_list)
+
+ @mock.patch("cloudinit.config.cc_snap.maybe_install_squashfuse")
+ def test_handle_tries_to_install_squashfuse(self, m_squash):
+ """If squashfuse_in_container is True, try installing squashfuse."""
+ cfg = {"snap": {"squashfuse_in_container": True}}
+ mycloud = FakeCloud(None)
+ handle("snap", cfg=cfg, cloud=mycloud, log=self.logger, args=None)
+ self.assertEqual([mock.call(mycloud)], m_squash.call_args_list)
+
+ def test_handle_runs_commands_provided(self):
+ """If commands are specified as a list, run them."""
+ outfile = self.tmp_path("output.log", dir=self.tmp)
+
+ cfg = {
+ "snap": {
+ "commands": [
+ 'echo "HI" >> %s' % outfile,
+ 'echo "MOM" >> %s' % outfile,
+ ]
+ }
+ }
+ mock_path = "cloudinit.config.cc_snap.sys.stderr"
+ with self.allow_subp([CiTestCase.SUBP_SHELL_TRUE]):
+ with mock.patch(mock_path, new_callable=StringIO):
+ handle("snap", cfg=cfg, cloud=None, log=self.logger, args=None)
+
+ self.assertEqual("HI\nMOM\n", util.load_file(outfile))
+
+ @mock.patch("cloudinit.config.cc_snap.subp.subp")
+ def test_handle_adds_assertions(self, m_subp):
+ """Any configured snap assertions are provided to add_assertions."""
+ assert_file = self.tmp_path("snapd.assertions", dir=self.tmp)
+ compare_file = self.tmp_path("comparison", dir=self.tmp)
+ cfg = {
+ "snap": {"assertions": [SYSTEM_USER_ASSERTION, ACCOUNT_ASSERTION]}
+ }
+ wrap_and_call(
+ "cloudinit.config.cc_snap",
+ {"ASSERTIONS_FILE": {"new": assert_file}},
+ handle,
+ "snap",
+ cfg=cfg,
+ cloud=None,
+ log=self.logger,
+ args=None,
+ )
+ content = "\n".join(cfg["snap"]["assertions"])
+ util.write_file(compare_file, content.encode("utf-8"))
+ self.assertEqual(
+ util.load_file(compare_file), util.load_file(assert_file)
+ )
+
+ @mock.patch("cloudinit.config.cc_snap.subp.subp")
+ @skipUnlessJsonSchema()
+ def test_handle_validates_schema(self, m_subp):
+ """Any provided configuration is runs validate_cloudconfig_schema."""
+ assert_file = self.tmp_path("snapd.assertions", dir=self.tmp)
+ cfg = {"snap": {"invalid": ""}} # Generates schema warning
+ wrap_and_call(
+ "cloudinit.config.cc_snap",
+ {"ASSERTIONS_FILE": {"new": assert_file}},
+ handle,
+ "snap",
+ cfg=cfg,
+ cloud=None,
+ log=self.logger,
+ args=None,
+ )
+ self.assertEqual(
+ "WARNING: Invalid cloud-config provided:\nsnap: Additional"
+ " properties are not allowed ('invalid' was unexpected)\n",
+ self.logs.getvalue(),
+ )
+
+
+class TestMaybeInstallSquashFuse(CiTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestMaybeInstallSquashFuse, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ @mock.patch("cloudinit.config.cc_snap.util.is_container")
+ def test_maybe_install_squashfuse_skips_non_containers(self, m_container):
+ """maybe_install_squashfuse does nothing when not on a container."""
+ m_container.return_value = False
+ maybe_install_squashfuse(cloud=FakeCloud(None))
+ self.assertEqual([mock.call()], m_container.call_args_list)
+ self.assertEqual("", self.logs.getvalue())
+
+ @mock.patch("cloudinit.config.cc_snap.util.is_container")
+ def test_maybe_install_squashfuse_raises_install_errors(self, m_container):
+ """maybe_install_squashfuse logs and raises package install errors."""
+ m_container.return_value = True
+ distro = mock.MagicMock()
+ distro.update_package_sources.side_effect = RuntimeError(
+ "Some apt error"
+ )
+ with self.assertRaises(RuntimeError) as context_manager:
+ maybe_install_squashfuse(cloud=FakeCloud(distro))
+ self.assertEqual("Some apt error", str(context_manager.exception))
+ self.assertIn("Package update failed\nTraceback", self.logs.getvalue())
+
+ @mock.patch("cloudinit.config.cc_snap.util.is_container")
+ def test_maybe_install_squashfuse_raises_update_errors(self, m_container):
+ """maybe_install_squashfuse logs and raises package update errors."""
+ m_container.return_value = True
+ distro = mock.MagicMock()
+ distro.update_package_sources.side_effect = RuntimeError(
+ "Some apt error"
+ )
+ with self.assertRaises(RuntimeError) as context_manager:
+ maybe_install_squashfuse(cloud=FakeCloud(distro))
+ self.assertEqual("Some apt error", str(context_manager.exception))
+ self.assertIn("Package update failed\nTraceback", self.logs.getvalue())
+
+ @mock.patch("cloudinit.config.cc_snap.util.is_container")
+ def test_maybe_install_squashfuse_happy_path(self, m_container):
+ """maybe_install_squashfuse logs and raises package install errors."""
+ m_container.return_value = True
+ distro = mock.MagicMock() # No errors raised
+ maybe_install_squashfuse(cloud=FakeCloud(distro))
+ self.assertEqual(
+ [mock.call()], distro.update_package_sources.call_args_list
+ )
+ self.assertEqual(
+ [mock.call(["squashfuse"])], distro.install_packages.call_args_list
+ )
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_spacewalk.py b/tests/unittests/config/test_cc_spacewalk.py
index 26f7648f..e1f42968 100644
--- a/tests/unittests/test_handler/test_handler_spacewalk.py
+++ b/tests/unittests/config/test_cc_spacewalk.py
@@ -1,21 +1,20 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.config import cc_spacewalk
-from cloudinit import subp
-
-from cloudinit.tests import helpers
-
import logging
from unittest import mock
+from cloudinit import subp
+from cloudinit.config import cc_spacewalk
+from tests.unittests import helpers
+
LOG = logging.getLogger(__name__)
class TestSpacewalk(helpers.TestCase):
space_cfg = {
- 'spacewalk': {
- 'server': 'localhost',
- 'profile_name': 'test',
+ "spacewalk": {
+ "server": "localhost",
+ "profile_name": "test",
}
}
@@ -31,12 +30,19 @@ class TestSpacewalk(helpers.TestCase):
@mock.patch("cloudinit.config.cc_spacewalk.subp.subp")
def test_do_register(self, mock_subp):
- cc_spacewalk.do_register(**self.space_cfg['spacewalk'])
- mock_subp.assert_called_with([
- 'rhnreg_ks',
- '--serverUrl', 'https://localhost/XMLRPC',
- '--profilename', 'test',
- '--sslCACert', cc_spacewalk.def_ca_cert_path,
- ], capture=False)
+ cc_spacewalk.do_register(**self.space_cfg["spacewalk"])
+ mock_subp.assert_called_with(
+ [
+ "rhnreg_ks",
+ "--serverUrl",
+ "https://localhost/XMLRPC",
+ "--profilename",
+ "test",
+ "--sslCACert",
+ cc_spacewalk.def_ca_cert_path,
+ ],
+ capture=False,
+ )
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_ssh.py b/tests/unittests/config/test_cc_ssh.py
new file mode 100644
index 00000000..d66cc4cb
--- /dev/null
+++ b/tests/unittests/config/test_cc_ssh.py
@@ -0,0 +1,467 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import logging
+import os.path
+
+from cloudinit import ssh_util
+from cloudinit.config import cc_ssh
+from tests.unittests.helpers import CiTestCase, mock
+
+LOG = logging.getLogger(__name__)
+
+MODPATH = "cloudinit.config.cc_ssh."
+KEY_NAMES_NO_DSA = [
+ name for name in cc_ssh.GENERATE_KEY_NAMES if name not in "dsa"
+]
+
+
+@mock.patch(MODPATH + "ssh_util.setup_user_keys")
+class TestHandleSsh(CiTestCase):
+ """Test cc_ssh handling of ssh config."""
+
+ def _publish_hostkey_test_setup(self):
+ self.test_hostkeys = {
+ "dsa": ("ssh-dss", "AAAAB3NzaC1kc3MAAACB"),
+ "ecdsa": ("ecdsa-sha2-nistp256", "AAAAE2VjZ"),
+ "ed25519": ("ssh-ed25519", "AAAAC3NzaC1lZDI"),
+ "rsa": ("ssh-rsa", "AAAAB3NzaC1yc2EAAA"),
+ }
+ self.test_hostkey_files = []
+ hostkey_tmpdir = self.tmp_dir()
+ for key_type in cc_ssh.GENERATE_KEY_NAMES:
+ key_data = self.test_hostkeys[key_type]
+ filename = "ssh_host_%s_key.pub" % key_type
+ filepath = os.path.join(hostkey_tmpdir, filename)
+ self.test_hostkey_files.append(filepath)
+ with open(filepath, "w") as f:
+ f.write(" ".join(key_data))
+
+ cc_ssh.KEY_FILE_TPL = os.path.join(hostkey_tmpdir, "ssh_host_%s_key")
+
+ def test_apply_credentials_with_user(self, m_setup_keys):
+ """Apply keys for the given user and root."""
+ keys = ["key1"]
+ user = "clouduser"
+ cc_ssh.apply_credentials(keys, user, False, ssh_util.DISABLE_USER_OPTS)
+ self.assertEqual(
+ [
+ mock.call(set(keys), user),
+ mock.call(set(keys), "root", options=""),
+ ],
+ m_setup_keys.call_args_list,
+ )
+
+ def test_apply_credentials_with_no_user(self, m_setup_keys):
+ """Apply keys for root only."""
+ keys = ["key1"]
+ user = None
+ cc_ssh.apply_credentials(keys, user, False, ssh_util.DISABLE_USER_OPTS)
+ self.assertEqual(
+ [mock.call(set(keys), "root", options="")],
+ m_setup_keys.call_args_list,
+ )
+
+ def test_apply_credentials_with_user_disable_root(self, m_setup_keys):
+ """Apply keys for the given user and disable root ssh."""
+ keys = ["key1"]
+ user = "clouduser"
+ options = ssh_util.DISABLE_USER_OPTS
+ cc_ssh.apply_credentials(keys, user, True, options)
+ options = options.replace("$USER", user)
+ options = options.replace("$DISABLE_USER", "root")
+ self.assertEqual(
+ [
+ mock.call(set(keys), user),
+ mock.call(set(keys), "root", options=options),
+ ],
+ m_setup_keys.call_args_list,
+ )
+
+ def test_apply_credentials_with_no_user_disable_root(self, m_setup_keys):
+ """Apply keys no user and disable root ssh."""
+ keys = ["key1"]
+ user = None
+ options = ssh_util.DISABLE_USER_OPTS
+ cc_ssh.apply_credentials(keys, user, True, options)
+ options = options.replace("$USER", "NONE")
+ options = options.replace("$DISABLE_USER", "root")
+ self.assertEqual(
+ [mock.call(set(keys), "root", options=options)],
+ m_setup_keys.call_args_list,
+ )
+
+ @mock.patch(MODPATH + "glob.glob")
+ @mock.patch(MODPATH + "ug_util.normalize_users_groups")
+ @mock.patch(MODPATH + "os.path.exists")
+ def test_handle_no_cfg(self, m_path_exists, m_nug, m_glob, m_setup_keys):
+ """Test handle with no config ignores generating existing keyfiles."""
+ cfg = {}
+ keys = ["key1"]
+ m_glob.return_value = [] # Return no matching keys to prevent removal
+ # Mock os.path.exits to True to short-circuit the key writing logic
+ m_path_exists.return_value = True
+ m_nug.return_value = ([], {})
+ cc_ssh.PUBLISH_HOST_KEYS = False
+ cloud = self.tmp_cloud(distro="ubuntu", metadata={"public-keys": keys})
+ cc_ssh.handle("name", cfg, cloud, LOG, None)
+ options = ssh_util.DISABLE_USER_OPTS.replace("$USER", "NONE")
+ options = options.replace("$DISABLE_USER", "root")
+ m_glob.assert_called_once_with("/etc/ssh/ssh_host_*key*")
+ self.assertIn(
+ [
+ mock.call("/etc/ssh/ssh_host_rsa_key"),
+ mock.call("/etc/ssh/ssh_host_dsa_key"),
+ mock.call("/etc/ssh/ssh_host_ecdsa_key"),
+ mock.call("/etc/ssh/ssh_host_ed25519_key"),
+ ],
+ m_path_exists.call_args_list,
+ )
+ self.assertEqual(
+ [mock.call(set(keys), "root", options=options)],
+ m_setup_keys.call_args_list,
+ )
+
+ @mock.patch(MODPATH + "glob.glob")
+ @mock.patch(MODPATH + "ug_util.normalize_users_groups")
+ @mock.patch(MODPATH + "os.path.exists")
+ def test_dont_allow_public_ssh_keys(
+ self, m_path_exists, m_nug, m_glob, m_setup_keys
+ ):
+ """Test allow_public_ssh_keys=False ignores ssh public keys from
+ platform.
+ """
+ cfg = {"allow_public_ssh_keys": False}
+ keys = ["key1"]
+ user = "clouduser"
+ m_glob.return_value = [] # Return no matching keys to prevent removal
+ # Mock os.path.exits to True to short-circuit the key writing logic
+ m_path_exists.return_value = True
+ m_nug.return_value = ({user: {"default": user}}, {})
+ cloud = self.tmp_cloud(distro="ubuntu", metadata={"public-keys": keys})
+ cc_ssh.handle("name", cfg, cloud, LOG, None)
+
+ options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user)
+ options = options.replace("$DISABLE_USER", "root")
+ self.assertEqual(
+ [
+ mock.call(set(), user),
+ mock.call(set(), "root", options=options),
+ ],
+ m_setup_keys.call_args_list,
+ )
+
+ @mock.patch(MODPATH + "glob.glob")
+ @mock.patch(MODPATH + "ug_util.normalize_users_groups")
+ @mock.patch(MODPATH + "os.path.exists")
+ def test_handle_no_cfg_and_default_root(
+ self, m_path_exists, m_nug, m_glob, m_setup_keys
+ ):
+ """Test handle with no config and a default distro user."""
+ cfg = {}
+ keys = ["key1"]
+ user = "clouduser"
+ m_glob.return_value = [] # Return no matching keys to prevent removal
+ # Mock os.path.exits to True to short-circuit the key writing logic
+ m_path_exists.return_value = True
+ m_nug.return_value = ({user: {"default": user}}, {})
+ cloud = self.tmp_cloud(distro="ubuntu", metadata={"public-keys": keys})
+ cc_ssh.handle("name", cfg, cloud, LOG, None)
+
+ options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user)
+ options = options.replace("$DISABLE_USER", "root")
+ self.assertEqual(
+ [
+ mock.call(set(keys), user),
+ mock.call(set(keys), "root", options=options),
+ ],
+ m_setup_keys.call_args_list,
+ )
+
+ @mock.patch(MODPATH + "glob.glob")
+ @mock.patch(MODPATH + "ug_util.normalize_users_groups")
+ @mock.patch(MODPATH + "os.path.exists")
+ def test_handle_cfg_with_explicit_disable_root(
+ self, m_path_exists, m_nug, m_glob, m_setup_keys
+ ):
+ """Test handle with explicit disable_root and a default distro user."""
+ # This test is identical to test_handle_no_cfg_and_default_root,
+ # except this uses an explicit cfg value
+ cfg = {"disable_root": True}
+ keys = ["key1"]
+ user = "clouduser"
+ m_glob.return_value = [] # Return no matching keys to prevent removal
+ # Mock os.path.exits to True to short-circuit the key writing logic
+ m_path_exists.return_value = True
+ m_nug.return_value = ({user: {"default": user}}, {})
+ cloud = self.tmp_cloud(distro="ubuntu", metadata={"public-keys": keys})
+ cc_ssh.handle("name", cfg, cloud, LOG, None)
+
+ options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user)
+ options = options.replace("$DISABLE_USER", "root")
+ self.assertEqual(
+ [
+ mock.call(set(keys), user),
+ mock.call(set(keys), "root", options=options),
+ ],
+ m_setup_keys.call_args_list,
+ )
+
+ @mock.patch(MODPATH + "glob.glob")
+ @mock.patch(MODPATH + "ug_util.normalize_users_groups")
+ @mock.patch(MODPATH + "os.path.exists")
+ def test_handle_cfg_without_disable_root(
+ self, m_path_exists, m_nug, m_glob, m_setup_keys
+ ):
+ """Test handle with disable_root == False."""
+ # When disable_root == False, the ssh redirect for root is skipped
+ cfg = {"disable_root": False}
+ keys = ["key1"]
+ user = "clouduser"
+ m_glob.return_value = [] # Return no matching keys to prevent removal
+ # Mock os.path.exits to True to short-circuit the key writing logic
+ m_path_exists.return_value = True
+ m_nug.return_value = ({user: {"default": user}}, {})
+ cloud = self.tmp_cloud(distro="ubuntu", metadata={"public-keys": keys})
+ cloud.get_public_ssh_keys = mock.Mock(return_value=keys)
+ cc_ssh.handle("name", cfg, cloud, LOG, None)
+
+ self.assertEqual(
+ [
+ mock.call(set(keys), user),
+ mock.call(set(keys), "root", options=""),
+ ],
+ m_setup_keys.call_args_list,
+ )
+
+ @mock.patch(MODPATH + "glob.glob")
+ @mock.patch(MODPATH + "ug_util.normalize_users_groups")
+ @mock.patch(MODPATH + "os.path.exists")
+ def test_handle_publish_hostkeys_default(
+ self, m_path_exists, m_nug, m_glob, m_setup_keys
+ ):
+ """Test handle with various configs for ssh_publish_hostkeys."""
+ self._publish_hostkey_test_setup()
+ cc_ssh.PUBLISH_HOST_KEYS = True
+ keys = ["key1"]
+ user = "clouduser"
+ # Return no matching keys for first glob, test keys for second.
+ m_glob.side_effect = iter(
+ [
+ [],
+ self.test_hostkey_files,
+ ]
+ )
+ # Mock os.path.exits to True to short-circuit the key writing logic
+ m_path_exists.return_value = True
+ m_nug.return_value = ({user: {"default": user}}, {})
+ cloud = self.tmp_cloud(distro="ubuntu", metadata={"public-keys": keys})
+ cloud.datasource.publish_host_keys = mock.Mock()
+
+ cfg = {}
+ expected_call = [
+ self.test_hostkeys[key_type] for key_type in KEY_NAMES_NO_DSA
+ ]
+ cc_ssh.handle("name", cfg, cloud, LOG, None)
+ self.assertEqual(
+ [mock.call(expected_call)],
+ cloud.datasource.publish_host_keys.call_args_list,
+ )
+
+ @mock.patch(MODPATH + "glob.glob")
+ @mock.patch(MODPATH + "ug_util.normalize_users_groups")
+ @mock.patch(MODPATH + "os.path.exists")
+ def test_handle_publish_hostkeys_config_enable(
+ self, m_path_exists, m_nug, m_glob, m_setup_keys
+ ):
+ """Test handle with various configs for ssh_publish_hostkeys."""
+ self._publish_hostkey_test_setup()
+ cc_ssh.PUBLISH_HOST_KEYS = False
+ keys = ["key1"]
+ user = "clouduser"
+ # Return no matching keys for first glob, test keys for second.
+ m_glob.side_effect = iter(
+ [
+ [],
+ self.test_hostkey_files,
+ ]
+ )
+ # Mock os.path.exits to True to short-circuit the key writing logic
+ m_path_exists.return_value = True
+ m_nug.return_value = ({user: {"default": user}}, {})
+ cloud = self.tmp_cloud(distro="ubuntu", metadata={"public-keys": keys})
+ cloud.datasource.publish_host_keys = mock.Mock()
+
+ cfg = {"ssh_publish_hostkeys": {"enabled": True}}
+ expected_call = [
+ self.test_hostkeys[key_type] for key_type in KEY_NAMES_NO_DSA
+ ]
+ cc_ssh.handle("name", cfg, cloud, LOG, None)
+ self.assertEqual(
+ [mock.call(expected_call)],
+ cloud.datasource.publish_host_keys.call_args_list,
+ )
+
+ @mock.patch(MODPATH + "glob.glob")
+ @mock.patch(MODPATH + "ug_util.normalize_users_groups")
+ @mock.patch(MODPATH + "os.path.exists")
+ def test_handle_publish_hostkeys_config_disable(
+ self, m_path_exists, m_nug, m_glob, m_setup_keys
+ ):
+ """Test handle with various configs for ssh_publish_hostkeys."""
+ self._publish_hostkey_test_setup()
+ cc_ssh.PUBLISH_HOST_KEYS = True
+ keys = ["key1"]
+ user = "clouduser"
+ # Return no matching keys for first glob, test keys for second.
+ m_glob.side_effect = iter(
+ [
+ [],
+ self.test_hostkey_files,
+ ]
+ )
+ # Mock os.path.exits to True to short-circuit the key writing logic
+ m_path_exists.return_value = True
+ m_nug.return_value = ({user: {"default": user}}, {})
+ cloud = self.tmp_cloud(distro="ubuntu", metadata={"public-keys": keys})
+ cloud.datasource.publish_host_keys = mock.Mock()
+
+ cfg = {"ssh_publish_hostkeys": {"enabled": False}}
+ cc_ssh.handle("name", cfg, cloud, LOG, None)
+ self.assertFalse(cloud.datasource.publish_host_keys.call_args_list)
+ cloud.datasource.publish_host_keys.assert_not_called()
+
+ @mock.patch(MODPATH + "glob.glob")
+ @mock.patch(MODPATH + "ug_util.normalize_users_groups")
+ @mock.patch(MODPATH + "os.path.exists")
+ def test_handle_publish_hostkeys_config_blacklist(
+ self, m_path_exists, m_nug, m_glob, m_setup_keys
+ ):
+ """Test handle with various configs for ssh_publish_hostkeys."""
+ self._publish_hostkey_test_setup()
+ cc_ssh.PUBLISH_HOST_KEYS = True
+ keys = ["key1"]
+ user = "clouduser"
+ # Return no matching keys for first glob, test keys for second.
+ m_glob.side_effect = iter(
+ [
+ [],
+ self.test_hostkey_files,
+ ]
+ )
+ # Mock os.path.exits to True to short-circuit the key writing logic
+ m_path_exists.return_value = True
+ m_nug.return_value = ({user: {"default": user}}, {})
+ cloud = self.tmp_cloud(distro="ubuntu", metadata={"public-keys": keys})
+ cloud.datasource.publish_host_keys = mock.Mock()
+
+ cfg = {
+ "ssh_publish_hostkeys": {
+ "enabled": True,
+ "blacklist": ["dsa", "rsa"],
+ }
+ }
+ expected_call = [
+ self.test_hostkeys[key_type] for key_type in ["ecdsa", "ed25519"]
+ ]
+ cc_ssh.handle("name", cfg, cloud, LOG, None)
+ self.assertEqual(
+ [mock.call(expected_call)],
+ cloud.datasource.publish_host_keys.call_args_list,
+ )
+
+ @mock.patch(MODPATH + "glob.glob")
+ @mock.patch(MODPATH + "ug_util.normalize_users_groups")
+ @mock.patch(MODPATH + "os.path.exists")
+ def test_handle_publish_hostkeys_empty_blacklist(
+ self, m_path_exists, m_nug, m_glob, m_setup_keys
+ ):
+ """Test handle with various configs for ssh_publish_hostkeys."""
+ self._publish_hostkey_test_setup()
+ cc_ssh.PUBLISH_HOST_KEYS = True
+ keys = ["key1"]
+ user = "clouduser"
+ # Return no matching keys for first glob, test keys for second.
+ m_glob.side_effect = iter(
+ [
+ [],
+ self.test_hostkey_files,
+ ]
+ )
+ # Mock os.path.exits to True to short-circuit the key writing logic
+ m_path_exists.return_value = True
+ m_nug.return_value = ({user: {"default": user}}, {})
+ cloud = self.tmp_cloud(distro="ubuntu", metadata={"public-keys": keys})
+ cloud.datasource.publish_host_keys = mock.Mock()
+
+ cfg = {"ssh_publish_hostkeys": {"enabled": True, "blacklist": []}}
+ expected_call = [
+ self.test_hostkeys[key_type]
+ for key_type in cc_ssh.GENERATE_KEY_NAMES
+ ]
+ cc_ssh.handle("name", cfg, cloud, LOG, None)
+ self.assertEqual(
+ [mock.call(expected_call)],
+ cloud.datasource.publish_host_keys.call_args_list,
+ )
+
+ @mock.patch(MODPATH + "ug_util.normalize_users_groups")
+ @mock.patch(MODPATH + "util.write_file")
+ def test_handle_ssh_keys_in_cfg(self, m_write_file, m_nug, m_setup_keys):
+ """Test handle with ssh keys and certificate."""
+ # Populate a config dictionary to pass to handle() as well
+ # as the expected file-writing calls.
+ cfg = {"ssh_keys": {}}
+
+ expected_calls = []
+ for key_type in cc_ssh.GENERATE_KEY_NAMES:
+ private_name = "{}_private".format(key_type)
+ public_name = "{}_public".format(key_type)
+ cert_name = "{}_certificate".format(key_type)
+
+ # Actual key contents don"t have to be realistic
+ private_value = "{}_PRIVATE_KEY".format(key_type)
+ public_value = "{}_PUBLIC_KEY".format(key_type)
+ cert_value = "{}_CERT_KEY".format(key_type)
+
+ cfg["ssh_keys"][private_name] = private_value
+ cfg["ssh_keys"][public_name] = public_value
+ cfg["ssh_keys"][cert_name] = cert_value
+
+ expected_calls.extend(
+ [
+ mock.call(
+ "/etc/ssh/ssh_host_{}_key".format(key_type),
+ private_value,
+ 384,
+ ),
+ mock.call(
+ "/etc/ssh/ssh_host_{}_key.pub".format(key_type),
+ public_value,
+ 384,
+ ),
+ mock.call(
+ "/etc/ssh/ssh_host_{}_key-cert.pub".format(key_type),
+ cert_value,
+ 384,
+ ),
+ mock.call(
+ "/etc/ssh/sshd_config",
+ "HostCertificate /etc/ssh/ssh_host_{}_key-cert.pub"
+ "\n".format(key_type),
+ preserve_mode=True,
+ ),
+ ]
+ )
+
+ # Run the handler.
+ m_nug.return_value = ([], {})
+ with mock.patch(
+ MODPATH + "ssh_util.parse_ssh_config", return_value=[]
+ ):
+ cc_ssh.handle(
+ "name", cfg, self.tmp_cloud(distro="ubuntu"), LOG, None
+ )
+
+ # Check that all expected output has been done.
+ for call_ in expected_calls:
+ self.assertIn(call_, m_write_file.call_args_list)
diff --git a/tests/unittests/test_handler/test_handler_timezone.py b/tests/unittests/config/test_cc_timezone.py
index 50c45363..f76397b7 100644
--- a/tests/unittests/test_handler/test_handler_timezone.py
+++ b/tests/unittests/config/test_cc_timezone.py
@@ -4,23 +4,18 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.config import cc_timezone
-
-from cloudinit import cloud
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import util
-
-from cloudinit.sources import DataSourceNoCloud
-
-from cloudinit.tests import helpers as t_help
-
-from configobj import ConfigObj
import logging
import shutil
import tempfile
from io import BytesIO
+from configobj import ConfigObj
+
+from cloudinit import util
+from cloudinit.config import cc_timezone
+from tests.unittests import helpers as t_help
+from tests.unittests.util import get_cloud
+
LOG = logging.getLogger(__name__)
@@ -29,38 +24,30 @@ class TestTimezone(t_help.FilesystemMockingTestCase):
super(TestTimezone, self).setUp()
self.new_root = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.new_root)
-
- def _get_cloud(self, distro):
self.patchUtils(self.new_root)
self.patchOS(self.new_root)
- paths = helpers.Paths({})
-
- cls = distros.fetch(distro)
- d = cls(distro, {}, paths)
- ds = DataSourceNoCloud.DataSourceNoCloud({}, d, paths)
- cc = cloud.Cloud(ds, paths, {}, d, None)
- return cc
-
def test_set_timezone_sles(self):
cfg = {
- 'timezone': 'Tatooine/Bestine',
+ "timezone": "Tatooine/Bestine",
}
- cc = self._get_cloud('sles')
+ cc = get_cloud("sles")
# Create a dummy timezone file
- dummy_contents = '0123456789abcdefgh'
- util.write_file('/usr/share/zoneinfo/%s' % cfg['timezone'],
- dummy_contents)
+ dummy_contents = "0123456789abcdefgh"
+ util.write_file(
+ "/usr/share/zoneinfo/%s" % cfg["timezone"], dummy_contents
+ )
- cc_timezone.handle('cc_timezone', cfg, cc, LOG, [])
+ cc_timezone.handle("cc_timezone", cfg, cc, LOG, [])
- contents = util.load_file('/etc/sysconfig/clock', decode=False)
+ contents = util.load_file("/etc/sysconfig/clock", decode=False)
n_cfg = ConfigObj(BytesIO(contents))
- self.assertEqual({'TIMEZONE': cfg['timezone']}, dict(n_cfg))
+ self.assertEqual({"TIMEZONE": cfg["timezone"]}, dict(n_cfg))
- contents = util.load_file('/etc/localtime')
+ contents = util.load_file("/etc/localtime")
self.assertEqual(dummy_contents, contents.strip())
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_ubuntu_advantage.py b/tests/unittests/config/test_cc_ubuntu_advantage.py
new file mode 100644
index 00000000..2037c5ed
--- /dev/null
+++ b/tests/unittests/config/test_cc_ubuntu_advantage.py
@@ -0,0 +1,391 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit import subp
+from cloudinit.config.cc_ubuntu_advantage import (
+ configure_ua,
+ handle,
+ maybe_install_ua_tools,
+ schema,
+)
+from cloudinit.config.schema import validate_cloudconfig_schema
+from tests.unittests.helpers import (
+ CiTestCase,
+ SchemaTestCaseMixin,
+ mock,
+ skipUnlessJsonSchema,
+)
+
+# Module path used in mocks
+MPATH = "cloudinit.config.cc_ubuntu_advantage"
+
+
+class FakeCloud(object):
+ def __init__(self, distro):
+ self.distro = distro
+
+
+class TestConfigureUA(CiTestCase):
+
+ with_logs = True
+ allowed_subp = [CiTestCase.SUBP_SHELL_TRUE]
+
+ def setUp(self):
+ super(TestConfigureUA, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ @mock.patch("%s.subp.subp" % MPATH)
+ def test_configure_ua_attach_error(self, m_subp):
+ """Errors from ua attach command are raised."""
+ m_subp.side_effect = subp.ProcessExecutionError(
+ "Invalid token SomeToken"
+ )
+ with self.assertRaises(RuntimeError) as context_manager:
+ configure_ua(token="SomeToken")
+ self.assertEqual(
+ "Failure attaching Ubuntu Advantage:\nUnexpected error while"
+ " running command.\nCommand: -\nExit code: -\nReason: -\n"
+ "Stdout: Invalid token SomeToken\nStderr: -",
+ str(context_manager.exception),
+ )
+
+ @mock.patch("%s.subp.subp" % MPATH)
+ def test_configure_ua_attach_with_token(self, m_subp):
+ """When token is provided, attach the machine to ua using the token."""
+ configure_ua(token="SomeToken")
+ m_subp.assert_called_once_with(["ua", "attach", "SomeToken"])
+ self.assertEqual(
+ "DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("%s.subp.subp" % MPATH)
+ def test_configure_ua_attach_on_service_error(self, m_subp):
+ """all services should be enabled and then any failures raised"""
+
+ def fake_subp(cmd, capture=None):
+ fail_cmds = [
+ ["ua", "enable", "--assume-yes", svc] for svc in ["esm", "cc"]
+ ]
+ if cmd in fail_cmds and capture:
+ svc = cmd[-1]
+ raise subp.ProcessExecutionError(
+ "Invalid {} credentials".format(svc.upper())
+ )
+
+ m_subp.side_effect = fake_subp
+
+ with self.assertRaises(RuntimeError) as context_manager:
+ configure_ua(token="SomeToken", enable=["esm", "cc", "fips"])
+ self.assertEqual(
+ m_subp.call_args_list,
+ [
+ mock.call(["ua", "attach", "SomeToken"]),
+ mock.call(
+ ["ua", "enable", "--assume-yes", "esm"], capture=True
+ ),
+ mock.call(
+ ["ua", "enable", "--assume-yes", "cc"], capture=True
+ ),
+ mock.call(
+ ["ua", "enable", "--assume-yes", "fips"], capture=True
+ ),
+ ],
+ )
+ self.assertIn(
+ 'WARNING: Failure enabling "esm":\nUnexpected error'
+ " while running command.\nCommand: -\nExit code: -\nReason: -\n"
+ "Stdout: Invalid ESM credentials\nStderr: -\n",
+ self.logs.getvalue(),
+ )
+ self.assertIn(
+ 'WARNING: Failure enabling "cc":\nUnexpected error'
+ " while running command.\nCommand: -\nExit code: -\nReason: -\n"
+ "Stdout: Invalid CC credentials\nStderr: -\n",
+ self.logs.getvalue(),
+ )
+ self.assertEqual(
+ 'Failure enabling Ubuntu Advantage service(s): "esm", "cc"',
+ str(context_manager.exception),
+ )
+
+ @mock.patch("%s.subp.subp" % MPATH)
+ def test_configure_ua_attach_with_empty_services(self, m_subp):
+ """When services is an empty list, do not auto-enable attach."""
+ configure_ua(token="SomeToken", enable=[])
+ m_subp.assert_called_once_with(["ua", "attach", "SomeToken"])
+ self.assertEqual(
+ "DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("%s.subp.subp" % MPATH)
+ def test_configure_ua_attach_with_specific_services(self, m_subp):
+ """When services a list, only enable specific services."""
+ configure_ua(token="SomeToken", enable=["fips"])
+ self.assertEqual(
+ m_subp.call_args_list,
+ [
+ mock.call(["ua", "attach", "SomeToken"]),
+ mock.call(
+ ["ua", "enable", "--assume-yes", "fips"], capture=True
+ ),
+ ],
+ )
+ self.assertEqual(
+ "DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("%s.maybe_install_ua_tools" % MPATH, mock.MagicMock())
+ @mock.patch("%s.subp.subp" % MPATH)
+ def test_configure_ua_attach_with_string_services(self, m_subp):
+ """When services a string, treat as singleton list and warn"""
+ configure_ua(token="SomeToken", enable="fips")
+ self.assertEqual(
+ m_subp.call_args_list,
+ [
+ mock.call(["ua", "attach", "SomeToken"]),
+ mock.call(
+ ["ua", "enable", "--assume-yes", "fips"], capture=True
+ ),
+ ],
+ )
+ self.assertEqual(
+ "WARNING: ubuntu_advantage: enable should be a list, not a"
+ " string; treating as a single enable\n"
+ "DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("%s.subp.subp" % MPATH)
+ def test_configure_ua_attach_with_weird_services(self, m_subp):
+ """When services not string or list, warn but still attach"""
+ configure_ua(token="SomeToken", enable={"deffo": "wont work"})
+ self.assertEqual(
+ m_subp.call_args_list, [mock.call(["ua", "attach", "SomeToken"])]
+ )
+ self.assertEqual(
+ "WARNING: ubuntu_advantage: enable should be a list, not a"
+ " dict; skipping enabling services\n"
+ "DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n",
+ self.logs.getvalue(),
+ )
+
+
+@skipUnlessJsonSchema()
+class TestSchema(CiTestCase, SchemaTestCaseMixin):
+
+ with_logs = True
+ schema = schema
+
+ @mock.patch("%s.maybe_install_ua_tools" % MPATH)
+ @mock.patch("%s.configure_ua" % MPATH)
+ def test_schema_warns_on_ubuntu_advantage_not_dict(self, _cfg, _):
+ """If ubuntu_advantage configuration is not a dict, emit a warning."""
+ validate_cloudconfig_schema({"ubuntu_advantage": "wrong type"}, schema)
+ self.assertEqual(
+ "WARNING: Invalid cloud-config provided:\nubuntu_advantage:"
+ " 'wrong type' is not of type 'object'\n",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("%s.maybe_install_ua_tools" % MPATH)
+ @mock.patch("%s.configure_ua" % MPATH)
+ def test_schema_disallows_unknown_keys(self, _cfg, _):
+ """Unknown keys in ubuntu_advantage configuration emit warnings."""
+ validate_cloudconfig_schema(
+ {"ubuntu_advantage": {"token": "winner", "invalid-key": ""}},
+ schema,
+ )
+ self.assertIn(
+ "WARNING: Invalid cloud-config provided:\nubuntu_advantage:"
+ " Additional properties are not allowed ('invalid-key' was"
+ " unexpected)",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("%s.maybe_install_ua_tools" % MPATH)
+ @mock.patch("%s.configure_ua" % MPATH)
+ def test_warn_schema_requires_token(self, _cfg, _):
+ """Warn if ubuntu_advantage configuration lacks token."""
+ validate_cloudconfig_schema(
+ {"ubuntu_advantage": {"enable": ["esm"]}}, schema
+ )
+ self.assertEqual(
+ "WARNING: Invalid cloud-config provided:\nubuntu_advantage:"
+ " 'token' is a required property\n",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("%s.maybe_install_ua_tools" % MPATH)
+ @mock.patch("%s.configure_ua" % MPATH)
+ def test_warn_schema_services_is_not_list_or_dict(self, _cfg, _):
+ """Warn when ubuntu_advantage:enable config is not a list."""
+ validate_cloudconfig_schema(
+ {"ubuntu_advantage": {"enable": "needslist"}}, schema
+ )
+ self.assertEqual(
+ "WARNING: Invalid cloud-config provided:\nubuntu_advantage:"
+ " 'token' is a required property\nubuntu_advantage.enable:"
+ " 'needslist' is not of type 'array'\n",
+ self.logs.getvalue(),
+ )
+
+
+class TestHandle(CiTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestHandle, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ @mock.patch("%s.validate_cloudconfig_schema" % MPATH)
+ def test_handle_no_config(self, m_schema):
+ """When no ua-related configuration is provided, nothing happens."""
+ cfg = {}
+ handle("ua-test", cfg=cfg, cloud=None, log=self.logger, args=None)
+ self.assertIn(
+ "DEBUG: Skipping module named ua-test, no 'ubuntu_advantage'"
+ " configuration found",
+ self.logs.getvalue(),
+ )
+ m_schema.assert_not_called()
+
+ @mock.patch("%s.configure_ua" % MPATH)
+ @mock.patch("%s.maybe_install_ua_tools" % MPATH)
+ def test_handle_tries_to_install_ubuntu_advantage_tools(
+ self, m_install, m_cfg
+ ):
+ """If ubuntu_advantage is provided, try installing ua-tools package."""
+ cfg = {"ubuntu_advantage": {"token": "valid"}}
+ mycloud = FakeCloud(None)
+ handle("nomatter", cfg=cfg, cloud=mycloud, log=self.logger, args=None)
+ m_install.assert_called_once_with(mycloud)
+
+ @mock.patch("%s.configure_ua" % MPATH)
+ @mock.patch("%s.maybe_install_ua_tools" % MPATH)
+ def test_handle_passes_credentials_and_services_to_configure_ua(
+ self, m_install, m_configure_ua
+ ):
+ """All ubuntu_advantage config keys are passed to configure_ua."""
+ cfg = {"ubuntu_advantage": {"token": "token", "enable": ["esm"]}}
+ handle("nomatter", cfg=cfg, cloud=None, log=self.logger, args=None)
+ m_configure_ua.assert_called_once_with(token="token", enable=["esm"])
+
+ @mock.patch("%s.maybe_install_ua_tools" % MPATH, mock.MagicMock())
+ @mock.patch("%s.configure_ua" % MPATH)
+ def test_handle_warns_on_deprecated_ubuntu_advantage_key_w_config(
+ self, m_configure_ua
+ ):
+ """Warning when ubuntu-advantage key is present with new config"""
+ cfg = {"ubuntu-advantage": {"token": "token", "enable": ["esm"]}}
+ handle("nomatter", cfg=cfg, cloud=None, log=self.logger, args=None)
+ self.assertEqual(
+ 'WARNING: Deprecated configuration key "ubuntu-advantage"'
+ ' provided. Expected underscore delimited "ubuntu_advantage";'
+ " will attempt to continue.",
+ self.logs.getvalue().splitlines()[0],
+ )
+ m_configure_ua.assert_called_once_with(token="token", enable=["esm"])
+
+ def test_handle_error_on_deprecated_commands_key_dashed(self):
+ """Error when commands is present in ubuntu-advantage key."""
+ cfg = {"ubuntu-advantage": {"commands": "nogo"}}
+ with self.assertRaises(RuntimeError) as context_manager:
+ handle("nomatter", cfg=cfg, cloud=None, log=self.logger, args=None)
+ self.assertEqual(
+ 'Deprecated configuration "ubuntu-advantage: commands" provided.'
+ ' Expected "token"',
+ str(context_manager.exception),
+ )
+
+ def test_handle_error_on_deprecated_commands_key_underscored(self):
+ """Error when commands is present in ubuntu_advantage key."""
+ cfg = {"ubuntu_advantage": {"commands": "nogo"}}
+ with self.assertRaises(RuntimeError) as context_manager:
+ handle("nomatter", cfg=cfg, cloud=None, log=self.logger, args=None)
+ self.assertEqual(
+ 'Deprecated configuration "ubuntu-advantage: commands" provided.'
+ ' Expected "token"',
+ str(context_manager.exception),
+ )
+
+ @mock.patch("%s.maybe_install_ua_tools" % MPATH, mock.MagicMock())
+ @mock.patch("%s.configure_ua" % MPATH)
+ def test_handle_prefers_new_style_config(self, m_configure_ua):
+ """ubuntu_advantage should be preferred over ubuntu-advantage"""
+ cfg = {
+ "ubuntu-advantage": {"token": "nope", "enable": ["wrong"]},
+ "ubuntu_advantage": {"token": "token", "enable": ["esm"]},
+ }
+ handle("nomatter", cfg=cfg, cloud=None, log=self.logger, args=None)
+ self.assertEqual(
+ 'WARNING: Deprecated configuration key "ubuntu-advantage"'
+ ' provided. Expected underscore delimited "ubuntu_advantage";'
+ " will attempt to continue.",
+ self.logs.getvalue().splitlines()[0],
+ )
+ m_configure_ua.assert_called_once_with(token="token", enable=["esm"])
+
+
+class TestMaybeInstallUATools(CiTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestMaybeInstallUATools, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ @mock.patch("%s.subp.which" % MPATH)
+ def test_maybe_install_ua_tools_noop_when_ua_tools_present(self, m_which):
+ """Do nothing if ubuntu-advantage-tools already exists."""
+ m_which.return_value = "/usr/bin/ua" # already installed
+ distro = mock.MagicMock()
+ distro.update_package_sources.side_effect = RuntimeError(
+ "Some apt error"
+ )
+ maybe_install_ua_tools(cloud=FakeCloud(distro)) # No RuntimeError
+
+ @mock.patch("%s.subp.which" % MPATH)
+ def test_maybe_install_ua_tools_raises_update_errors(self, m_which):
+ """maybe_install_ua_tools logs and raises apt update errors."""
+ m_which.return_value = None
+ distro = mock.MagicMock()
+ distro.update_package_sources.side_effect = RuntimeError(
+ "Some apt error"
+ )
+ with self.assertRaises(RuntimeError) as context_manager:
+ maybe_install_ua_tools(cloud=FakeCloud(distro))
+ self.assertEqual("Some apt error", str(context_manager.exception))
+ self.assertIn("Package update failed\nTraceback", self.logs.getvalue())
+
+ @mock.patch("%s.subp.which" % MPATH)
+ def test_maybe_install_ua_raises_install_errors(self, m_which):
+ """maybe_install_ua_tools logs and raises package install errors."""
+ m_which.return_value = None
+ distro = mock.MagicMock()
+ distro.update_package_sources.return_value = None
+ distro.install_packages.side_effect = RuntimeError(
+ "Some install error"
+ )
+ with self.assertRaises(RuntimeError) as context_manager:
+ maybe_install_ua_tools(cloud=FakeCloud(distro))
+ self.assertEqual("Some install error", str(context_manager.exception))
+ self.assertIn(
+ "Failed to install ubuntu-advantage-tools\n", self.logs.getvalue()
+ )
+
+ @mock.patch("%s.subp.which" % MPATH)
+ def test_maybe_install_ua_tools_happy_path(self, m_which):
+ """maybe_install_ua_tools installs ubuntu-advantage-tools."""
+ m_which.return_value = None
+ distro = mock.MagicMock() # No errors raised
+ maybe_install_ua_tools(cloud=FakeCloud(distro))
+ distro.update_package_sources.assert_called_once_with()
+ distro.install_packages.assert_called_once_with(
+ ["ubuntu-advantage-tools"]
+ )
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_ubuntu_drivers.py b/tests/unittests/config/test_cc_ubuntu_drivers.py
new file mode 100644
index 00000000..4987492d
--- /dev/null
+++ b/tests/unittests/config/test_cc_ubuntu_drivers.py
@@ -0,0 +1,293 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import copy
+import os
+
+from cloudinit.config import cc_ubuntu_drivers as drivers
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ validate_cloudconfig_schema,
+)
+from cloudinit.subp import ProcessExecutionError
+from tests.unittests.helpers import CiTestCase, mock, skipUnlessJsonSchema
+
+MPATH = "cloudinit.config.cc_ubuntu_drivers."
+M_TMP_PATH = MPATH + "temp_utils.mkdtemp"
+OLD_UBUNTU_DRIVERS_ERROR_STDERR = (
+ "ubuntu-drivers: error: argument <command>: invalid choice: 'install' "
+ "(choose from 'list', 'autoinstall', 'devices', 'debug')\n"
+)
+
+
+# The tests in this module call helper methods which are decorated with
+# mock.patch. pylint doesn't understand that mock.patch passes parameters to
+# the decorated function, so it incorrectly reports that we aren't passing
+# values for all parameters. Instead of annotating every single call, we
+# disable it for the entire module:
+# pylint: disable=no-value-for-parameter
+
+
+class AnyTempScriptAndDebconfFile(object):
+ def __init__(self, tmp_dir, debconf_file):
+ self.tmp_dir = tmp_dir
+ self.debconf_file = debconf_file
+
+ def __eq__(self, cmd):
+ if not len(cmd) == 2:
+ return False
+ script, debconf_file = cmd
+ if bool(script.startswith(self.tmp_dir) and script.endswith(".sh")):
+ return debconf_file == self.debconf_file
+ return False
+
+
+class TestUbuntuDrivers(CiTestCase):
+ cfg_accepted = {"drivers": {"nvidia": {"license-accepted": True}}}
+ install_gpgpu = ["ubuntu-drivers", "install", "--gpgpu", "nvidia"]
+
+ with_logs = True
+
+ @skipUnlessJsonSchema()
+ def test_schema_requires_boolean_for_license_accepted(self):
+ with self.assertRaisesRegex(
+ SchemaValidationError, ".*license-accepted.*TRUE.*boolean"
+ ):
+ validate_cloudconfig_schema(
+ {"drivers": {"nvidia": {"license-accepted": "TRUE"}}},
+ schema=drivers.schema,
+ strict=True,
+ )
+
+ @mock.patch(M_TMP_PATH)
+ @mock.patch(MPATH + "subp.subp", return_value=("", ""))
+ @mock.patch(MPATH + "subp.which", return_value=False)
+ def _assert_happy_path_taken(self, config, m_which, m_subp, m_tmp):
+ """Positive path test through handle. Package should be installed."""
+ tdir = self.tmp_dir()
+ debconf_file = os.path.join(tdir, "nvidia.template")
+ m_tmp.return_value = tdir
+ myCloud = mock.MagicMock()
+ drivers.handle("ubuntu_drivers", config, myCloud, None, None)
+ self.assertEqual(
+ [mock.call(["ubuntu-drivers-common"])],
+ myCloud.distro.install_packages.call_args_list,
+ )
+ self.assertEqual(
+ [
+ mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)),
+ mock.call(self.install_gpgpu),
+ ],
+ m_subp.call_args_list,
+ )
+
+ def test_handle_does_package_install(self):
+ self._assert_happy_path_taken(self.cfg_accepted)
+
+ def test_trueish_strings_are_considered_approval(self):
+ for true_value in ["yes", "true", "on", "1"]:
+ new_config = copy.deepcopy(self.cfg_accepted)
+ new_config["drivers"]["nvidia"]["license-accepted"] = true_value
+ self._assert_happy_path_taken(new_config)
+
+ @mock.patch(M_TMP_PATH)
+ @mock.patch(MPATH + "subp.subp")
+ @mock.patch(MPATH + "subp.which", return_value=False)
+ def test_handle_raises_error_if_no_drivers_found(
+ self, m_which, m_subp, m_tmp
+ ):
+ """If ubuntu-drivers doesn't install any drivers, raise an error."""
+ tdir = self.tmp_dir()
+ debconf_file = os.path.join(tdir, "nvidia.template")
+ m_tmp.return_value = tdir
+ myCloud = mock.MagicMock()
+
+ def fake_subp(cmd):
+ if cmd[0].startswith(tdir):
+ return
+ raise ProcessExecutionError(
+ stdout="No drivers found for installation.\n", exit_code=1
+ )
+
+ m_subp.side_effect = fake_subp
+
+ with self.assertRaises(Exception):
+ drivers.handle(
+ "ubuntu_drivers", self.cfg_accepted, myCloud, None, None
+ )
+ self.assertEqual(
+ [mock.call(["ubuntu-drivers-common"])],
+ myCloud.distro.install_packages.call_args_list,
+ )
+ self.assertEqual(
+ [
+ mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)),
+ mock.call(self.install_gpgpu),
+ ],
+ m_subp.call_args_list,
+ )
+ self.assertIn(
+ "ubuntu-drivers found no drivers for installation",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch(MPATH + "subp.subp", return_value=("", ""))
+ @mock.patch(MPATH + "subp.which", return_value=False)
+ def _assert_inert_with_config(self, config, m_which, m_subp):
+ """Helper to reduce repetition when testing negative cases"""
+ myCloud = mock.MagicMock()
+ drivers.handle("ubuntu_drivers", config, myCloud, None, None)
+ self.assertEqual(0, myCloud.distro.install_packages.call_count)
+ self.assertEqual(0, m_subp.call_count)
+
+ def test_handle_inert_if_license_not_accepted(self):
+ """Ensure we don't do anything if the license is rejected."""
+ self._assert_inert_with_config(
+ {"drivers": {"nvidia": {"license-accepted": False}}}
+ )
+
+ def test_handle_inert_if_garbage_in_license_field(self):
+ """Ensure we don't do anything if unknown text is in license field."""
+ self._assert_inert_with_config(
+ {"drivers": {"nvidia": {"license-accepted": "garbage"}}}
+ )
+
+ def test_handle_inert_if_no_license_key(self):
+ """Ensure we don't do anything if no license key."""
+ self._assert_inert_with_config({"drivers": {"nvidia": {}}})
+
+ def test_handle_inert_if_no_nvidia_key(self):
+ """Ensure we don't do anything if other license accepted."""
+ self._assert_inert_with_config(
+ {"drivers": {"acme": {"license-accepted": True}}}
+ )
+
+ def test_handle_inert_if_string_given(self):
+ """Ensure we don't do anything if string refusal given."""
+ for false_value in ["no", "false", "off", "0"]:
+ self._assert_inert_with_config(
+ {"drivers": {"nvidia": {"license-accepted": false_value}}}
+ )
+
+ @mock.patch(MPATH + "install_drivers")
+ def test_handle_no_drivers_does_nothing(self, m_install_drivers):
+ """If no 'drivers' key in the config, nothing should be done."""
+ myCloud = mock.MagicMock()
+ myLog = mock.MagicMock()
+ drivers.handle("ubuntu_drivers", {"foo": "bzr"}, myCloud, myLog, None)
+ self.assertIn(
+ "Skipping module named", myLog.debug.call_args_list[0][0][0]
+ )
+ self.assertEqual(0, m_install_drivers.call_count)
+
+ @mock.patch(M_TMP_PATH)
+ @mock.patch(MPATH + "subp.subp", return_value=("", ""))
+ @mock.patch(MPATH + "subp.which", return_value=True)
+ def test_install_drivers_no_install_if_present(
+ self, m_which, m_subp, m_tmp
+ ):
+ """If 'ubuntu-drivers' is present, no package install should occur."""
+ tdir = self.tmp_dir()
+ debconf_file = os.path.join(tdir, "nvidia.template")
+ m_tmp.return_value = tdir
+ pkg_install = mock.MagicMock()
+ drivers.install_drivers(
+ self.cfg_accepted["drivers"], pkg_install_func=pkg_install
+ )
+ self.assertEqual(0, pkg_install.call_count)
+ self.assertEqual([mock.call("ubuntu-drivers")], m_which.call_args_list)
+ self.assertEqual(
+ [
+ mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)),
+ mock.call(self.install_gpgpu),
+ ],
+ m_subp.call_args_list,
+ )
+
+ def test_install_drivers_rejects_invalid_config(self):
+ """install_drivers should raise TypeError if not given a config dict"""
+ pkg_install = mock.MagicMock()
+ with self.assertRaisesRegex(TypeError, ".*expected dict.*"):
+ drivers.install_drivers("mystring", pkg_install_func=pkg_install)
+ self.assertEqual(0, pkg_install.call_count)
+
+ @mock.patch(M_TMP_PATH)
+ @mock.patch(MPATH + "subp.subp")
+ @mock.patch(MPATH + "subp.which", return_value=False)
+ def test_install_drivers_handles_old_ubuntu_drivers_gracefully(
+ self, m_which, m_subp, m_tmp
+ ):
+ """Older ubuntu-drivers versions should emit message and raise error"""
+ tdir = self.tmp_dir()
+ debconf_file = os.path.join(tdir, "nvidia.template")
+ m_tmp.return_value = tdir
+ myCloud = mock.MagicMock()
+
+ def fake_subp(cmd):
+ if cmd[0].startswith(tdir):
+ return
+ raise ProcessExecutionError(
+ stderr=OLD_UBUNTU_DRIVERS_ERROR_STDERR, exit_code=2
+ )
+
+ m_subp.side_effect = fake_subp
+
+ with self.assertRaises(Exception):
+ drivers.handle(
+ "ubuntu_drivers", self.cfg_accepted, myCloud, None, None
+ )
+ self.assertEqual(
+ [mock.call(["ubuntu-drivers-common"])],
+ myCloud.distro.install_packages.call_args_list,
+ )
+ self.assertEqual(
+ [
+ mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)),
+ mock.call(self.install_gpgpu),
+ ],
+ m_subp.call_args_list,
+ )
+ self.assertIn(
+ "WARNING: the available version of ubuntu-drivers is"
+ " too old to perform requested driver installation",
+ self.logs.getvalue(),
+ )
+
+
+# Sub-class TestUbuntuDrivers to run the same test cases, but with a version
+class TestUbuntuDriversWithVersion(TestUbuntuDrivers):
+ cfg_accepted = {
+ "drivers": {"nvidia": {"license-accepted": True, "version": "123"}}
+ }
+ install_gpgpu = ["ubuntu-drivers", "install", "--gpgpu", "nvidia:123"]
+
+ @mock.patch(M_TMP_PATH)
+ @mock.patch(MPATH + "subp.subp", return_value=("", ""))
+ @mock.patch(MPATH + "subp.which", return_value=False)
+ def test_version_none_uses_latest(self, m_which, m_subp, m_tmp):
+ tdir = self.tmp_dir()
+ debconf_file = os.path.join(tdir, "nvidia.template")
+ m_tmp.return_value = tdir
+ myCloud = mock.MagicMock()
+ version_none_cfg = {
+ "drivers": {"nvidia": {"license-accepted": True, "version": None}}
+ }
+ drivers.handle("ubuntu_drivers", version_none_cfg, myCloud, None, None)
+ self.assertEqual(
+ [
+ mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)),
+ mock.call(["ubuntu-drivers", "install", "--gpgpu", "nvidia"]),
+ ],
+ m_subp.call_args_list,
+ )
+
+ def test_specifying_a_version_doesnt_override_license_acceptance(self):
+ self._assert_inert_with_config(
+ {
+ "drivers": {
+ "nvidia": {"license-accepted": False, "version": "123"}
+ }
+ }
+ )
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_update_etc_hosts.py b/tests/unittests/config/test_cc_update_etc_hosts.py
new file mode 100644
index 00000000..2bbc16f4
--- /dev/null
+++ b/tests/unittests/config/test_cc_update_etc_hosts.py
@@ -0,0 +1,68 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import logging
+import os
+import shutil
+
+from cloudinit import cloud, distros, helpers, util
+from cloudinit.config import cc_update_etc_hosts
+from tests.unittests import helpers as t_help
+
+LOG = logging.getLogger(__name__)
+
+
+class TestHostsFile(t_help.FilesystemMockingTestCase):
+ def setUp(self):
+ super(TestHostsFile, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ def _fetch_distro(self, kind):
+ cls = distros.fetch(kind)
+ paths = helpers.Paths({})
+ return cls(kind, {}, paths)
+
+ def test_write_etc_hosts_suse_localhost(self):
+ cfg = {
+ "manage_etc_hosts": "localhost",
+ "hostname": "cloud-init.test.us",
+ }
+ os.makedirs("%s/etc/" % self.tmp)
+ hosts_content = "192.168.1.1 blah.blah.us blah\n"
+ fout = open("%s/etc/hosts" % self.tmp, "w")
+ fout.write(hosts_content)
+ fout.close()
+ distro = self._fetch_distro("sles")
+ distro.hosts_fn = "%s/etc/hosts" % self.tmp
+ paths = helpers.Paths({})
+ ds = None
+ cc = cloud.Cloud(ds, paths, {}, distro, None)
+ self.patchUtils(self.tmp)
+ cc_update_etc_hosts.handle("test", cfg, cc, LOG, [])
+ contents = util.load_file("%s/etc/hosts" % self.tmp)
+ if "127.0.1.1\tcloud-init.test.us\tcloud-init" not in contents:
+ self.assertIsNone("No entry for 127.0.1.1 in etc/hosts")
+ if "192.168.1.1\tblah.blah.us\tblah" not in contents:
+ self.assertIsNone("Default etc/hosts content modified")
+
+ @t_help.skipUnlessJinja()
+ def test_write_etc_hosts_suse_template(self):
+ cfg = {
+ "manage_etc_hosts": "template",
+ "hostname": "cloud-init.test.us",
+ }
+ shutil.copytree(
+ t_help.cloud_init_project_dir("templates"),
+ "%s/etc/cloud/templates" % self.tmp,
+ )
+ distro = self._fetch_distro("sles")
+ paths = helpers.Paths({})
+ paths.template_tpl = "%s" % self.tmp + "/etc/cloud/templates/%s.tmpl"
+ ds = None
+ cc = cloud.Cloud(ds, paths, {}, distro, None)
+ self.patchUtils(self.tmp)
+ cc_update_etc_hosts.handle("test", cfg, cc, LOG, [])
+ contents = util.load_file("%s/etc/hosts" % self.tmp)
+ if "127.0.1.1 cloud-init.test.us cloud-init" not in contents:
+ self.assertIsNone("No entry for 127.0.1.1 in etc/hosts")
+ if "::1 cloud-init.test.us cloud-init" not in contents:
+ self.assertIsNone("No entry for 127.0.0.1 in etc/hosts")
diff --git a/tests/unittests/config/test_cc_users_groups.py b/tests/unittests/config/test_cc_users_groups.py
new file mode 100644
index 00000000..0bd3c980
--- /dev/null
+++ b/tests/unittests/config/test_cc_users_groups.py
@@ -0,0 +1,268 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+
+from cloudinit.config import cc_users_groups
+from tests.unittests.helpers import CiTestCase, mock
+
+MODPATH = "cloudinit.config.cc_users_groups"
+
+
+@mock.patch("cloudinit.distros.ubuntu.Distro.create_group")
+@mock.patch("cloudinit.distros.ubuntu.Distro.create_user")
+class TestHandleUsersGroups(CiTestCase):
+ """Test cc_users_groups handling of config."""
+
+ with_logs = True
+
+ def test_handle_no_cfg_creates_no_users_or_groups(self, m_user, m_group):
+ """Test handle with no config will not create users or groups."""
+ cfg = {} # merged cloud-config
+ # System config defines a default user for the distro.
+ sys_cfg = {
+ "default_user": {
+ "name": "ubuntu",
+ "lock_passwd": True,
+ "groups": ["lxd", "sudo"],
+ "shell": "/bin/bash",
+ }
+ }
+ metadata = {}
+ cloud = self.tmp_cloud(
+ distro="ubuntu", sys_cfg=sys_cfg, metadata=metadata
+ )
+ cc_users_groups.handle("modulename", cfg, cloud, None, None)
+ m_user.assert_not_called()
+ m_group.assert_not_called()
+
+ def test_handle_users_in_cfg_calls_create_users(self, m_user, m_group):
+ """When users in config, create users with distro.create_user."""
+ cfg = {"users": ["default", {"name": "me2"}]} # merged cloud-config
+ # System config defines a default user for the distro.
+ sys_cfg = {
+ "default_user": {
+ "name": "ubuntu",
+ "lock_passwd": True,
+ "groups": ["lxd", "sudo"],
+ "shell": "/bin/bash",
+ }
+ }
+ metadata = {}
+ cloud = self.tmp_cloud(
+ distro="ubuntu", sys_cfg=sys_cfg, metadata=metadata
+ )
+ cc_users_groups.handle("modulename", cfg, cloud, None, None)
+ self.assertCountEqual(
+ m_user.call_args_list,
+ [
+ mock.call(
+ "ubuntu",
+ groups="lxd,sudo",
+ lock_passwd=True,
+ shell="/bin/bash",
+ ),
+ mock.call("me2", default=False),
+ ],
+ )
+ m_group.assert_not_called()
+
+ @mock.patch("cloudinit.distros.freebsd.Distro.create_group")
+ @mock.patch("cloudinit.distros.freebsd.Distro.create_user")
+ def test_handle_users_in_cfg_calls_create_users_on_bsd(
+ self,
+ m_fbsd_user,
+ m_fbsd_group,
+ m_linux_user,
+ m_linux_group,
+ ):
+ """When users in config, create users with freebsd.create_user."""
+ cfg = {"users": ["default", {"name": "me2"}]} # merged cloud-config
+ # System config defines a default user for the distro.
+ sys_cfg = {
+ "default_user": {
+ "name": "freebsd",
+ "lock_passwd": True,
+ "groups": ["wheel"],
+ "shell": "/bin/tcsh",
+ }
+ }
+ metadata = {}
+ cloud = self.tmp_cloud(
+ distro="freebsd", sys_cfg=sys_cfg, metadata=metadata
+ )
+ cc_users_groups.handle("modulename", cfg, cloud, None, None)
+ self.assertCountEqual(
+ m_fbsd_user.call_args_list,
+ [
+ mock.call(
+ "freebsd",
+ groups="wheel",
+ lock_passwd=True,
+ shell="/bin/tcsh",
+ ),
+ mock.call("me2", default=False),
+ ],
+ )
+ m_fbsd_group.assert_not_called()
+ m_linux_group.assert_not_called()
+ m_linux_user.assert_not_called()
+
+ def test_users_with_ssh_redirect_user_passes_keys(self, m_user, m_group):
+ """When ssh_redirect_user is True pass default user and cloud keys."""
+ cfg = {
+ "users": ["default", {"name": "me2", "ssh_redirect_user": True}]
+ }
+ # System config defines a default user for the distro.
+ sys_cfg = {
+ "default_user": {
+ "name": "ubuntu",
+ "lock_passwd": True,
+ "groups": ["lxd", "sudo"],
+ "shell": "/bin/bash",
+ }
+ }
+ metadata = {"public-keys": ["key1"]}
+ cloud = self.tmp_cloud(
+ distro="ubuntu", sys_cfg=sys_cfg, metadata=metadata
+ )
+ cc_users_groups.handle("modulename", cfg, cloud, None, None)
+ self.assertCountEqual(
+ m_user.call_args_list,
+ [
+ mock.call(
+ "ubuntu",
+ groups="lxd,sudo",
+ lock_passwd=True,
+ shell="/bin/bash",
+ ),
+ mock.call(
+ "me2",
+ cloud_public_ssh_keys=["key1"],
+ default=False,
+ ssh_redirect_user="ubuntu",
+ ),
+ ],
+ )
+ m_group.assert_not_called()
+
+ def test_users_with_ssh_redirect_user_default_str(self, m_user, m_group):
+ """When ssh_redirect_user is 'default' pass default username."""
+ cfg = {
+ "users": [
+ "default",
+ {"name": "me2", "ssh_redirect_user": "default"},
+ ]
+ }
+ # System config defines a default user for the distro.
+ sys_cfg = {
+ "default_user": {
+ "name": "ubuntu",
+ "lock_passwd": True,
+ "groups": ["lxd", "sudo"],
+ "shell": "/bin/bash",
+ }
+ }
+ metadata = {"public-keys": ["key1"]}
+ cloud = self.tmp_cloud(
+ distro="ubuntu", sys_cfg=sys_cfg, metadata=metadata
+ )
+ cc_users_groups.handle("modulename", cfg, cloud, None, None)
+ self.assertCountEqual(
+ m_user.call_args_list,
+ [
+ mock.call(
+ "ubuntu",
+ groups="lxd,sudo",
+ lock_passwd=True,
+ shell="/bin/bash",
+ ),
+ mock.call(
+ "me2",
+ cloud_public_ssh_keys=["key1"],
+ default=False,
+ ssh_redirect_user="ubuntu",
+ ),
+ ],
+ )
+ m_group.assert_not_called()
+
+ def test_users_with_ssh_redirect_user_non_default(self, m_user, m_group):
+ """Warn when ssh_redirect_user is not 'default'."""
+ cfg = {
+ "users": [
+ "default",
+ {"name": "me2", "ssh_redirect_user": "snowflake"},
+ ]
+ }
+ # System config defines a default user for the distro.
+ sys_cfg = {
+ "default_user": {
+ "name": "ubuntu",
+ "lock_passwd": True,
+ "groups": ["lxd", "sudo"],
+ "shell": "/bin/bash",
+ }
+ }
+ metadata = {"public-keys": ["key1"]}
+ cloud = self.tmp_cloud(
+ distro="ubuntu", sys_cfg=sys_cfg, metadata=metadata
+ )
+ with self.assertRaises(ValueError) as context_manager:
+ cc_users_groups.handle("modulename", cfg, cloud, None, None)
+ m_group.assert_not_called()
+ self.assertEqual(
+ "Not creating user me2. Invalid value of ssh_redirect_user:"
+ " snowflake. Expected values: true, default or false.",
+ str(context_manager.exception),
+ )
+
+ def test_users_with_ssh_redirect_user_default_false(self, m_user, m_group):
+ """When unspecified ssh_redirect_user is false and not set up."""
+ cfg = {"users": ["default", {"name": "me2"}]}
+ # System config defines a default user for the distro.
+ sys_cfg = {
+ "default_user": {
+ "name": "ubuntu",
+ "lock_passwd": True,
+ "groups": ["lxd", "sudo"],
+ "shell": "/bin/bash",
+ }
+ }
+ metadata = {"public-keys": ["key1"]}
+ cloud = self.tmp_cloud(
+ distro="ubuntu", sys_cfg=sys_cfg, metadata=metadata
+ )
+ cc_users_groups.handle("modulename", cfg, cloud, None, None)
+ self.assertCountEqual(
+ m_user.call_args_list,
+ [
+ mock.call(
+ "ubuntu",
+ groups="lxd,sudo",
+ lock_passwd=True,
+ shell="/bin/bash",
+ ),
+ mock.call("me2", default=False),
+ ],
+ )
+ m_group.assert_not_called()
+
+ def test_users_ssh_redirect_user_and_no_default(self, m_user, m_group):
+ """Warn when ssh_redirect_user is True and no default user present."""
+ cfg = {
+ "users": ["default", {"name": "me2", "ssh_redirect_user": True}]
+ }
+ # System config defines *no* default user for the distro.
+ sys_cfg = {}
+ metadata = {} # no public-keys defined
+ cloud = self.tmp_cloud(
+ distro="ubuntu", sys_cfg=sys_cfg, metadata=metadata
+ )
+ cc_users_groups.handle("modulename", cfg, cloud, None, None)
+ m_user.assert_called_once_with("me2", default=False)
+ m_group.assert_not_called()
+ self.assertEqual(
+ "WARNING: Ignoring ssh_redirect_user: True for me2. No"
+ " default_user defined. Perhaps missing"
+ " cloud configuration users: [default, ..].\n",
+ self.logs.getvalue(),
+ )
diff --git a/tests/unittests/test_handler/test_handler_write_files.py b/tests/unittests/config/test_cc_write_files.py
index 727681d3..faea5885 100644
--- a/tests/unittests/test_handler/test_handler_write_files.py
+++ b/tests/unittests/config/test_cc_write_files.py
@@ -7,13 +7,15 @@ import io
import shutil
import tempfile
-from cloudinit.config.cc_write_files import (
- handle, decode_perms, write_files)
from cloudinit import log as logging
from cloudinit import util
-
-from cloudinit.tests.helpers import (
- CiTestCase, FilesystemMockingTestCase, mock, skipUnlessJsonSchema)
+from cloudinit.config.cc_write_files import decode_perms, handle, write_files
+from tests.unittests.helpers import (
+ CiTestCase,
+ FilesystemMockingTestCase,
+ mock,
+ skipUnlessJsonSchema,
+)
LOG = logging.getLogger(__name__)
@@ -35,73 +37,91 @@ write_files:
"""
YAML_CONTENT_EXPECTED = {
- '/usr/bin/hello': "#!/bin/sh\necho hello world\n",
- '/wark': "foobar\n",
- '/tmp/message': "hi mom line 1\nhi mom line 2\n",
+ "/usr/bin/hello": "#!/bin/sh\necho hello world\n",
+ "/wark": "foobar\n",
+ "/tmp/message": "hi mom line 1\nhi mom line 2\n",
}
VALID_SCHEMA = {
- 'write_files': [
- {'append': False, 'content': 'a', 'encoding': 'gzip', 'owner': 'jeff',
- 'path': '/some', 'permissions': '0777'}
+ "write_files": [
+ {
+ "append": False,
+ "content": "a",
+ "encoding": "gzip",
+ "owner": "jeff",
+ "path": "/some",
+ "permissions": "0777",
+ }
]
}
INVALID_SCHEMA = { # Dropped required path key
- 'write_files': [
- {'append': False, 'content': 'a', 'encoding': 'gzip', 'owner': 'jeff',
- 'permissions': '0777'}
+ "write_files": [
+ {
+ "append": False,
+ "content": "a",
+ "encoding": "gzip",
+ "owner": "jeff",
+ "permissions": "0777",
+ }
]
}
@skipUnlessJsonSchema()
-@mock.patch('cloudinit.config.cc_write_files.write_files')
+@mock.patch("cloudinit.config.cc_write_files.write_files")
class TestWriteFilesSchema(CiTestCase):
with_logs = True
def test_schema_validation_warns_missing_path(self, m_write_files):
"""The only required file item property is 'path'."""
- cc = self.tmp_cloud('ubuntu')
- valid_config = {'write_files': [{'path': '/some/path'}]}
- handle('cc_write_file', valid_config, cc, LOG, [])
- self.assertNotIn('Invalid config:', self.logs.getvalue())
- handle('cc_write_file', INVALID_SCHEMA, cc, LOG, [])
- self.assertIn('Invalid config:', self.logs.getvalue())
+ cc = self.tmp_cloud("ubuntu")
+ valid_config = {"write_files": [{"path": "/some/path"}]}
+ handle("cc_write_file", valid_config, cc, LOG, [])
+ self.assertNotIn(
+ "Invalid cloud-config provided:", self.logs.getvalue()
+ )
+ handle("cc_write_file", INVALID_SCHEMA, cc, LOG, [])
+ self.assertIn("Invalid cloud-config provided:", self.logs.getvalue())
self.assertIn("'path' is a required property", self.logs.getvalue())
def test_schema_validation_warns_non_string_type_for_files(
- self, m_write_files):
+ self, m_write_files
+ ):
"""Schema validation warns of non-string values for each file item."""
- cc = self.tmp_cloud('ubuntu')
- for key in VALID_SCHEMA['write_files'][0].keys():
- if key == 'append':
- key_type = 'boolean'
+ cc = self.tmp_cloud("ubuntu")
+ for key in VALID_SCHEMA["write_files"][0].keys():
+ if key == "append":
+ key_type = "boolean"
else:
- key_type = 'string'
+ key_type = "string"
invalid_config = copy.deepcopy(VALID_SCHEMA)
- invalid_config['write_files'][0][key] = 1
- handle('cc_write_file', invalid_config, cc, LOG, [])
+ invalid_config["write_files"][0][key] = 1
+ handle("cc_write_file", invalid_config, cc, LOG, [])
self.assertIn(
- mock.call('cc_write_file', invalid_config['write_files']),
- m_write_files.call_args_list)
+ mock.call("cc_write_file", invalid_config["write_files"]),
+ m_write_files.call_args_list,
+ )
self.assertIn(
- 'write_files.0.%s: 1 is not of type \'%s\'' % (key, key_type),
- self.logs.getvalue())
- self.assertIn('Invalid config:', self.logs.getvalue())
+ "write_files.0.%s: 1 is not of type '%s'" % (key, key_type),
+ self.logs.getvalue(),
+ )
+ self.assertIn("Invalid cloud-config provided:", self.logs.getvalue())
def test_schema_validation_warns_on_additional_undefined_propertes(
- self, m_write_files):
+ self, m_write_files
+ ):
"""Schema validation warns on additional undefined file properties."""
- cc = self.tmp_cloud('ubuntu')
+ cc = self.tmp_cloud("ubuntu")
invalid_config = copy.deepcopy(VALID_SCHEMA)
- invalid_config['write_files'][0]['bogus'] = 'value'
- handle('cc_write_file', invalid_config, cc, LOG, [])
+ invalid_config["write_files"][0]["bogus"] = "value"
+ handle("cc_write_file", invalid_config, cc, LOG, [])
self.assertIn(
- "Invalid config:\nwrite_files.0: Additional properties"
- " are not allowed ('bogus' was unexpected)",
- self.logs.getvalue())
+ "Invalid cloud-config provided:\nwrite_files.0: Additional"
+ " properties are not allowed ('bogus' was unexpected)",
+ self.logs.getvalue(),
+ )
class TestWriteFiles(FilesystemMockingTestCase):
@@ -116,20 +136,21 @@ class TestWriteFiles(FilesystemMockingTestCase):
@skipUnlessJsonSchema()
def test_handler_schema_validation_warns_non_array_type(self):
"""Schema validation warns of non-array value."""
- invalid_config = {'write_files': 1}
- cc = self.tmp_cloud('ubuntu')
+ invalid_config = {"write_files": 1}
+ cc = self.tmp_cloud("ubuntu")
with self.assertRaises(TypeError):
- handle('cc_write_file', invalid_config, cc, LOG, [])
+ handle("cc_write_file", invalid_config, cc, LOG, [])
self.assertIn(
- 'Invalid config:\nwrite_files: 1 is not of type \'array\'',
- self.logs.getvalue())
+ "Invalid cloud-config provided:\nwrite_files: 1 is not of type"
+ " 'array'",
+ self.logs.getvalue(),
+ )
def test_simple(self):
self.patchUtils(self.tmp)
expected = "hello world\n"
filename = "/tmp/my.file"
- write_files(
- "test_simple", [{"content": expected, "path": filename}])
+ write_files("test_simple", [{"content": expected, "path": filename}])
self.assertEqual(util.load_file(filename), expected)
def test_append(self):
@@ -141,13 +162,14 @@ class TestWriteFiles(FilesystemMockingTestCase):
util.write_file(filename, existing)
write_files(
"test_append",
- [{"content": added, "path": filename, "append": "true"}])
+ [{"content": added, "path": filename, "append": "true"}],
+ )
self.assertEqual(util.load_file(filename), expected)
def test_yaml_binary(self):
self.patchUtils(self.tmp)
data = util.load_yaml(YAML_TEXT)
- write_files("testname", data['write_files'])
+ write_files("testname", data["write_files"])
for path, content in YAML_CONTENT_EXPECTED.items():
self.assertEqual(util.load_file(path), content)
@@ -158,13 +180,13 @@ class TestWriteFiles(FilesystemMockingTestCase):
# for 'gz', 'gzip', 'gz+base64' ...
data = b"foobzr"
utf8_valid = b"foobzr"
- utf8_invalid = b'ab\xaadef'
+ utf8_invalid = b"ab\xaadef"
files = []
expected = []
- gz_aliases = ('gz', 'gzip')
- gz_b64_aliases = ('gz+base64', 'gzip+base64', 'gz+b64', 'gzip+b64')
- b64_aliases = ('base64', 'b64')
+ gz_aliases = ("gz", "gzip")
+ gz_b64_aliases = ("gz+base64", "gzip+base64", "gz+b64", "gzip+b64")
+ b64_aliases = ("base64", "b64")
datum = (("utf8", utf8_valid), ("no-utf8", utf8_invalid))
for name, data in datum:
@@ -173,11 +195,13 @@ class TestWriteFiles(FilesystemMockingTestCase):
b64 = (base64.b64encode(data), b64_aliases)
for content, aliases in (gz, gz_b64, b64):
for enc in aliases:
- cur = {'content': content,
- 'path': '/tmp/file-%s-%s' % (name, enc),
- 'encoding': enc}
+ cur = {
+ "content": content,
+ "path": "/tmp/file-%s-%s" % (name, enc),
+ "encoding": enc,
+ }
files.append(cur)
- expected.append((cur['path'], data))
+ expected.append((cur["path"], data))
write_files("test_decoding", files)
@@ -185,10 +209,20 @@ class TestWriteFiles(FilesystemMockingTestCase):
self.assertEqual(util.load_file(path, decode=False), content)
# make sure we actually wrote *some* files.
- flen_expected = (
- len(gz_aliases + gz_b64_aliases + b64_aliases) * len(datum))
+ flen_expected = len(gz_aliases + gz_b64_aliases + b64_aliases) * len(
+ datum
+ )
self.assertEqual(len(expected), flen_expected)
+ def test_deferred(self):
+ self.patchUtils(self.tmp)
+ file_path = "/tmp/deferred.file"
+ config = {"write_files": [{"path": file_path, "defer": True}]}
+ cc = self.tmp_cloud("ubuntu")
+ handle("cc_write_file", config, cc, LOG, [])
+ with self.assertRaises(FileNotFoundError):
+ util.load_file(file_path)
+
class TestDecodePerms(CiTestCase):
diff --git a/tests/unittests/config/test_cc_write_files_deferred.py b/tests/unittests/config/test_cc_write_files_deferred.py
new file mode 100644
index 00000000..17203233
--- /dev/null
+++ b/tests/unittests/config/test_cc_write_files_deferred.py
@@ -0,0 +1,85 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import shutil
+import tempfile
+
+from cloudinit import log as logging
+from cloudinit import util
+from cloudinit.config.cc_write_files_deferred import handle
+from tests.unittests.helpers import (
+ CiTestCase,
+ FilesystemMockingTestCase,
+ mock,
+ skipUnlessJsonSchema,
+)
+
+from .test_cc_write_files import VALID_SCHEMA
+
+LOG = logging.getLogger(__name__)
+
+
+@skipUnlessJsonSchema()
+@mock.patch("cloudinit.config.cc_write_files_deferred.write_files")
+class TestWriteFilesDeferredSchema(CiTestCase):
+
+ with_logs = True
+
+ def test_schema_validation_warns_invalid_value(
+ self, m_write_files_deferred
+ ):
+ """If 'defer' is defined, it must be of type 'bool'."""
+
+ valid_config = {
+ "write_files": [
+ {**VALID_SCHEMA.get("write_files")[0], "defer": True}
+ ]
+ }
+
+ invalid_config = {
+ "write_files": [
+ {**VALID_SCHEMA.get("write_files")[0], "defer": str("no")}
+ ]
+ }
+
+ cc = self.tmp_cloud("ubuntu")
+ handle("cc_write_files_deferred", valid_config, cc, LOG, [])
+ self.assertNotIn(
+ "Invalid cloud-config provided:", self.logs.getvalue()
+ )
+ handle("cc_write_files_deferred", invalid_config, cc, LOG, [])
+ self.assertIn("Invalid cloud-config provided:", self.logs.getvalue())
+ self.assertIn(
+ "defer: 'no' is not of type 'boolean'", self.logs.getvalue()
+ )
+
+
+class TestWriteFilesDeferred(FilesystemMockingTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestWriteFilesDeferred, self).setUp()
+ self.tmp = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, self.tmp)
+
+ def test_filtering_deferred_files(self):
+ self.patchUtils(self.tmp)
+ expected = "hello world\n"
+ config = {
+ "write_files": [
+ {
+ "path": "/tmp/deferred.file",
+ "defer": True,
+ "content": expected,
+ },
+ {"path": "/tmp/not_deferred.file"},
+ ]
+ }
+ cc = self.tmp_cloud("ubuntu")
+ handle("cc_write_files_deferred", config, cc, LOG, [])
+ self.assertEqual(util.load_file("/tmp/deferred.file"), expected)
+ with self.assertRaises(FileNotFoundError):
+ util.load_file("/tmp/not_deferred.file")
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_yum_add_repo.py b/tests/unittests/config/test_cc_yum_add_repo.py
new file mode 100644
index 00000000..550b0af2
--- /dev/null
+++ b/tests/unittests/config/test_cc_yum_add_repo.py
@@ -0,0 +1,120 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import configparser
+import logging
+import shutil
+import tempfile
+
+from cloudinit import util
+from cloudinit.config import cc_yum_add_repo
+from tests.unittests import helpers
+
+LOG = logging.getLogger(__name__)
+
+
+class TestConfig(helpers.FilesystemMockingTestCase):
+ def setUp(self):
+ super(TestConfig, self).setUp()
+ self.tmp = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, self.tmp)
+
+ def test_bad_config(self):
+ cfg = {
+ "yum_repos": {
+ "epel-testing": {
+ "name": "Extra Packages for Enterprise Linux 5 - Testing",
+ # Missing this should cause the repo not to be written
+ # 'baseurl': 'http://blah.org/pub/epel/testing/5/$barch',
+ "enabled": False,
+ "gpgcheck": True,
+ "gpgkey": "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL",
+ "failovermethod": "priority",
+ },
+ },
+ }
+ self.patchUtils(self.tmp)
+ cc_yum_add_repo.handle("yum_add_repo", cfg, None, LOG, [])
+ self.assertRaises(
+ IOError, util.load_file, "/etc/yum.repos.d/epel_testing.repo"
+ )
+
+ def test_write_config(self):
+ cfg = {
+ "yum_repos": {
+ "epel-testing": {
+ "name": "Extra Packages for Enterprise Linux 5 - Testing",
+ "baseurl": "http://blah.org/pub/epel/testing/5/$basearch",
+ "enabled": False,
+ "gpgcheck": True,
+ "gpgkey": "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL",
+ "failovermethod": "priority",
+ },
+ },
+ }
+ self.patchUtils(self.tmp)
+ cc_yum_add_repo.handle("yum_add_repo", cfg, None, LOG, [])
+ contents = util.load_file("/etc/yum.repos.d/epel_testing.repo")
+ parser = configparser.ConfigParser()
+ parser.read_string(contents)
+ expected = {
+ "epel_testing": {
+ "name": "Extra Packages for Enterprise Linux 5 - Testing",
+ "failovermethod": "priority",
+ "gpgkey": "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL",
+ "enabled": "0",
+ "baseurl": "http://blah.org/pub/epel/testing/5/$basearch",
+ "gpgcheck": "1",
+ }
+ }
+ for section in expected:
+ self.assertTrue(
+ parser.has_section(section),
+ "Contains section {0}".format(section),
+ )
+ for k, v in expected[section].items():
+ self.assertEqual(parser.get(section, k), v)
+
+ def test_write_config_array(self):
+ cfg = {
+ "yum_repos": {
+ "puppetlabs-products": {
+ "name": "Puppet Labs Products El 6 - $basearch",
+ "baseurl": (
+ "http://yum.puppetlabs.com/el/6/products/$basearch"
+ ),
+ "gpgkey": [
+ "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppetlabs",
+ "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppet",
+ ],
+ "enabled": True,
+ "gpgcheck": True,
+ }
+ }
+ }
+ self.patchUtils(self.tmp)
+ cc_yum_add_repo.handle("yum_add_repo", cfg, None, LOG, [])
+ contents = util.load_file("/etc/yum.repos.d/puppetlabs_products.repo")
+ parser = configparser.ConfigParser()
+ parser.read_string(contents)
+ expected = {
+ "puppetlabs_products": {
+ "name": "Puppet Labs Products El 6 - $basearch",
+ "baseurl": "http://yum.puppetlabs.com/el/6/products/$basearch",
+ "gpgkey": (
+ "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppetlabs\n"
+ "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppet"
+ ),
+ "enabled": "1",
+ "gpgcheck": "1",
+ }
+ }
+ for section in expected:
+ self.assertTrue(
+ parser.has_section(section),
+ "Contains section {0}".format(section),
+ )
+ for k, v in expected[section].items():
+ self.assertEqual(parser.get(section, k), v)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_zypper_add_repo.py b/tests/unittests/config/test_cc_zypper_add_repo.py
index 0fb1de1a..4304fee1 100644
--- a/tests/unittests/test_handler/test_handler_zypper_add_repo.py
+++ b/tests/unittests/config/test_cc_zypper_add_repo.py
@@ -7,8 +7,8 @@ import os
from cloudinit import util
from cloudinit.config import cc_zypper_add_repo
-from cloudinit.tests import helpers
-from cloudinit.tests.helpers import mock
+from tests.unittests import helpers
+from tests.unittests.helpers import mock
LOG = logging.getLogger(__name__)
@@ -17,31 +17,28 @@ class TestConfig(helpers.FilesystemMockingTestCase):
def setUp(self):
super(TestConfig, self).setUp()
self.tmp = self.tmp_dir()
- self.zypp_conf = 'etc/zypp/zypp.conf'
+ self.zypp_conf = "etc/zypp/zypp.conf"
def test_bad_repo_config(self):
"""Config has no baseurl, no file should be written"""
cfg = {
- 'repos': [
- {
- 'id': 'foo',
- 'name': 'suse-test',
- 'enabled': '1'
- },
+ "repos": [
+ {"id": "foo", "name": "suse-test", "enabled": "1"},
]
}
self.patchUtils(self.tmp)
- cc_zypper_add_repo._write_repos(cfg['repos'], '/etc/zypp/repos.d')
- self.assertRaises(IOError, util.load_file,
- "/etc/zypp/repos.d/foo.repo")
+ cc_zypper_add_repo._write_repos(cfg["repos"], "/etc/zypp/repos.d")
+ self.assertRaises(
+ IOError, util.load_file, "/etc/zypp/repos.d/foo.repo"
+ )
def test_write_repos(self):
"""Verify valid repos get written"""
cfg = self._get_base_config_repos()
root_d = self.tmp_dir()
- cc_zypper_add_repo._write_repos(cfg['zypper']['repos'], root_d)
- repos = glob.glob('%s/*.repo' % root_d)
- expected_repos = ['testing-foo.repo', 'testing-bar.repo']
+ cc_zypper_add_repo._write_repos(cfg["zypper"]["repos"], root_d)
+ repos = glob.glob("%s/*.repo" % root_d)
+ expected_repos = ["testing-foo.repo", "testing-bar.repo"]
if len(repos) != 2:
assert 'Number of repos written is "%d" expected 2' % len(repos)
for repo in repos:
@@ -53,80 +50,77 @@ class TestConfig(helpers.FilesystemMockingTestCase):
def test_write_repo(self):
"""Verify the content of a repo file"""
cfg = {
- 'repos': [
+ "repos": [
{
- 'baseurl': 'http://foo',
- 'name': 'test-foo',
- 'id': 'testing-foo'
+ "baseurl": "http://foo",
+ "name": "test-foo",
+ "id": "testing-foo",
},
]
}
root_d = self.tmp_dir()
- cc_zypper_add_repo._write_repos(cfg['repos'], root_d)
+ cc_zypper_add_repo._write_repos(cfg["repos"], root_d)
contents = util.load_file("%s/testing-foo.repo" % root_d)
parser = configparser.ConfigParser()
parser.read_string(contents)
expected = {
- 'testing-foo': {
- 'name': 'test-foo',
- 'baseurl': 'http://foo',
- 'enabled': '1',
- 'autorefresh': '1'
+ "testing-foo": {
+ "name": "test-foo",
+ "baseurl": "http://foo",
+ "enabled": "1",
+ "autorefresh": "1",
}
}
for section in expected:
- self.assertTrue(parser.has_section(section),
- "Contains section {0}".format(section))
+ self.assertTrue(
+ parser.has_section(section),
+ "Contains section {0}".format(section),
+ )
for k, v in expected[section].items():
self.assertEqual(parser.get(section, k), v)
def test_config_write(self):
"""Write valid configuration data"""
- cfg = {
- 'config': {
- 'download.deltarpm': 'False',
- 'reposdir': 'foo'
- }
- }
+ cfg = {"config": {"download.deltarpm": "False", "reposdir": "foo"}}
root_d = self.tmp_dir()
- helpers.populate_dir(root_d, {self.zypp_conf: '# Zypp config\n'})
+ helpers.populate_dir(root_d, {self.zypp_conf: "# Zypp config\n"})
self.reRoot(root_d)
- cc_zypper_add_repo._write_zypp_config(cfg['config'])
+ cc_zypper_add_repo._write_zypp_config(cfg["config"])
cfg_out = os.path.join(root_d, self.zypp_conf)
contents = util.load_file(cfg_out)
expected = [
- '# Zypp config',
- '# Added via cloud.cfg',
- 'download.deltarpm=False',
- 'reposdir=foo'
+ "# Zypp config",
+ "# Added via cloud.cfg",
+ "download.deltarpm=False",
+ "reposdir=foo",
]
- for item in contents.split('\n'):
+ for item in contents.split("\n"):
if item not in expected:
self.assertIsNone(item)
- @mock.patch('cloudinit.log.logging')
+ @mock.patch("cloudinit.log.logging")
def test_config_write_skip_configdir(self, mock_logging):
"""Write configuration but skip writing 'configdir' setting"""
cfg = {
- 'config': {
- 'download.deltarpm': 'False',
- 'reposdir': 'foo',
- 'configdir': 'bar'
+ "config": {
+ "download.deltarpm": "False",
+ "reposdir": "foo",
+ "configdir": "bar",
}
}
root_d = self.tmp_dir()
- helpers.populate_dir(root_d, {self.zypp_conf: '# Zypp config\n'})
+ helpers.populate_dir(root_d, {self.zypp_conf: "# Zypp config\n"})
self.reRoot(root_d)
- cc_zypper_add_repo._write_zypp_config(cfg['config'])
+ cc_zypper_add_repo._write_zypp_config(cfg["config"])
cfg_out = os.path.join(root_d, self.zypp_conf)
contents = util.load_file(cfg_out)
expected = [
- '# Zypp config',
- '# Added via cloud.cfg',
- 'download.deltarpm=False',
- 'reposdir=foo'
+ "# Zypp config",
+ "# Added via cloud.cfg",
+ "download.deltarpm=False",
+ "reposdir=foo",
]
- for item in contents.split('\n'):
+ for item in contents.split("\n"):
if item not in expected:
self.assertIsNone(item)
# Not finding teh right path for mocking :(
@@ -134,55 +128,53 @@ class TestConfig(helpers.FilesystemMockingTestCase):
def test_empty_config_section_no_new_data(self):
"""When the config section is empty no new data should be written to
- zypp.conf"""
+ zypp.conf"""
cfg = self._get_base_config_repos()
- cfg['zypper']['config'] = None
+ cfg["zypper"]["config"] = None
root_d = self.tmp_dir()
- helpers.populate_dir(root_d, {self.zypp_conf: '# No data'})
+ helpers.populate_dir(root_d, {self.zypp_conf: "# No data"})
self.reRoot(root_d)
- cc_zypper_add_repo._write_zypp_config(cfg.get('config', {}))
+ cc_zypper_add_repo._write_zypp_config(cfg.get("config", {}))
cfg_out = os.path.join(root_d, self.zypp_conf)
contents = util.load_file(cfg_out)
- self.assertEqual(contents, '# No data')
+ self.assertEqual(contents, "# No data")
def test_empty_config_value_no_new_data(self):
"""When the config section is not empty but there are no values
- no new data should be written to zypp.conf"""
+ no new data should be written to zypp.conf"""
cfg = self._get_base_config_repos()
- cfg['zypper']['config'] = {
- 'download.deltarpm': None
- }
+ cfg["zypper"]["config"] = {"download.deltarpm": None}
root_d = self.tmp_dir()
- helpers.populate_dir(root_d, {self.zypp_conf: '# No data'})
+ helpers.populate_dir(root_d, {self.zypp_conf: "# No data"})
self.reRoot(root_d)
- cc_zypper_add_repo._write_zypp_config(cfg.get('config', {}))
+ cc_zypper_add_repo._write_zypp_config(cfg.get("config", {}))
cfg_out = os.path.join(root_d, self.zypp_conf)
contents = util.load_file(cfg_out)
- self.assertEqual(contents, '# No data')
+ self.assertEqual(contents, "# No data")
def test_handler_full_setup(self):
"""Test that the handler ends up calling the renderers"""
cfg = self._get_base_config_repos()
- cfg['zypper']['config'] = {
- 'download.deltarpm': 'False',
+ cfg["zypper"]["config"] = {
+ "download.deltarpm": "False",
}
root_d = self.tmp_dir()
- os.makedirs('%s/etc/zypp/repos.d' % root_d)
- helpers.populate_dir(root_d, {self.zypp_conf: '# Zypp config\n'})
+ os.makedirs("%s/etc/zypp/repos.d" % root_d)
+ helpers.populate_dir(root_d, {self.zypp_conf: "# Zypp config\n"})
self.reRoot(root_d)
- cc_zypper_add_repo.handle('zypper_add_repo', cfg, None, LOG, [])
+ cc_zypper_add_repo.handle("zypper_add_repo", cfg, None, LOG, [])
cfg_out = os.path.join(root_d, self.zypp_conf)
contents = util.load_file(cfg_out)
expected = [
- '# Zypp config',
- '# Added via cloud.cfg',
- 'download.deltarpm=False',
+ "# Zypp config",
+ "# Added via cloud.cfg",
+ "download.deltarpm=False",
]
- for item in contents.split('\n'):
+ for item in contents.split("\n"):
if item not in expected:
self.assertIsNone(item)
- repos = glob.glob('%s/etc/zypp/repos.d/*.repo' % root_d)
- expected_repos = ['testing-foo.repo', 'testing-bar.repo']
+ repos = glob.glob("%s/etc/zypp/repos.d/*.repo" % root_d)
+ expected_repos = ["testing-foo.repo", "testing-bar.repo"]
if len(repos) != 2:
assert 'Number of repos written is "%d" expected 2' % len(repos)
for repo in repos:
@@ -192,39 +184,39 @@ class TestConfig(helpers.FilesystemMockingTestCase):
def test_no_config_section_no_new_data(self):
"""When there is no config section no new data should be written to
- zypp.conf"""
+ zypp.conf"""
cfg = self._get_base_config_repos()
root_d = self.tmp_dir()
- helpers.populate_dir(root_d, {self.zypp_conf: '# No data'})
+ helpers.populate_dir(root_d, {self.zypp_conf: "# No data"})
self.reRoot(root_d)
- cc_zypper_add_repo._write_zypp_config(cfg.get('config', {}))
+ cc_zypper_add_repo._write_zypp_config(cfg.get("config", {}))
cfg_out = os.path.join(root_d, self.zypp_conf)
contents = util.load_file(cfg_out)
- self.assertEqual(contents, '# No data')
+ self.assertEqual(contents, "# No data")
def test_no_repo_data(self):
"""When there is no repo data nothing should happen"""
root_d = self.tmp_dir()
self.reRoot(root_d)
cc_zypper_add_repo._write_repos(None, root_d)
- content = glob.glob('%s/*' % root_d)
+ content = glob.glob("%s/*" % root_d)
self.assertEqual(len(content), 0)
def _get_base_config_repos(self):
"""Basic valid repo configuration"""
cfg = {
- 'zypper': {
- 'repos': [
+ "zypper": {
+ "repos": [
{
- 'baseurl': 'http://foo',
- 'name': 'test-foo',
- 'id': 'testing-foo'
+ "baseurl": "http://foo",
+ "name": "test-foo",
+ "id": "testing-foo",
},
{
- 'baseurl': 'http://bar',
- 'name': 'test-bar',
- 'id': 'testing-bar'
- }
+ "baseurl": "http://bar",
+ "name": "test-bar",
+ "id": "testing-bar",
+ },
]
}
}
diff --git a/tests/unittests/config/test_schema.py b/tests/unittests/config/test_schema.py
new file mode 100644
index 00000000..3a39f343
--- /dev/null
+++ b/tests/unittests/config/test_schema.py
@@ -0,0 +1,917 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+
+import importlib
+import inspect
+import itertools
+import logging
+import sys
+from copy import copy
+from pathlib import Path
+from textwrap import dedent
+
+import pytest
+import yaml
+from yaml import safe_load
+
+from cloudinit.config.schema import (
+ CLOUD_CONFIG_HEADER,
+ MetaSchema,
+ SchemaValidationError,
+ _schemapath_for_cloudconfig,
+ annotated_cloudconfig_file,
+ get_jsonschema_validator,
+ get_meta_doc,
+ get_schema,
+ load_doc,
+ main,
+ validate_cloudconfig_file,
+ validate_cloudconfig_metaschema,
+ validate_cloudconfig_schema,
+)
+from cloudinit.util import write_file
+from tests.unittests.helpers import (
+ CiTestCase,
+ cloud_init_project_dir,
+ mock,
+ skipUnlessJsonSchema,
+)
+
+
+def get_schemas() -> dict:
+ """Return all legacy module schemas
+
+ Assumes that module schemas have the variable name "schema"
+ """
+ return get_module_variable("schema")
+
+
+def get_metas() -> dict:
+ """Return all module metas
+
+ Assumes that module schemas have the variable name "schema"
+ """
+ return get_module_variable("meta")
+
+
+def get_module_variable(var_name) -> dict:
+ """Inspect modules and get variable from module matching var_name"""
+ schemas = {}
+
+ files = list(
+ Path(cloud_init_project_dir("cloudinit/config/")).glob("cc_*.py")
+ )
+
+ modules = [mod.stem for mod in files]
+
+ for module in modules:
+ importlib.import_module("cloudinit.config.{}".format(module))
+
+ for k, v in sys.modules.items():
+ path = Path(k)
+
+ if "cloudinit.config" == path.stem and path.suffix[1:4] == "cc_":
+ module_name = path.suffix[1:]
+ members = inspect.getmembers(v)
+ schemas[module_name] = None
+ for name, value in members:
+ if name == var_name:
+ schemas[module_name] = value
+ break
+ return schemas
+
+
+class TestGetSchema:
+ def test_get_schema_coalesces_known_schema(self):
+ """Every cloudconfig module with schema is listed in allOf keyword."""
+ schema = get_schema()
+ assert sorted(
+ [
+ "cc_apk_configure",
+ "cc_apt_configure",
+ "cc_apt_pipelining",
+ "cc_bootcmd",
+ "cc_byobu",
+ "cc_ca_certs",
+ "cc_chef",
+ "cc_debug",
+ "cc_disable_ec2_metadata",
+ "cc_disk_setup",
+ "cc_install_hotplug",
+ "cc_keyboard",
+ "cc_locale",
+ "cc_ntp",
+ "cc_resizefs",
+ "cc_resizefs_vyos",
+ "cc_runcmd",
+ "cc_snap",
+ "cc_ubuntu_advantage",
+ "cc_ubuntu_drivers",
+ "cc_write_files",
+ "cc_zypper_add_repo",
+ ]
+ ) == sorted(
+ [meta["id"] for meta in get_metas().values() if meta is not None]
+ )
+ assert "http://json-schema.org/draft-04/schema#" == schema["$schema"]
+ assert ["$defs", "$schema", "allOf"] == sorted(list(schema.keys()))
+ # New style schema should be defined in static schema file in $defs
+ expected_subschema_defs = [
+ {"$ref": "#/$defs/cc_apk_configure"},
+ {"$ref": "#/$defs/cc_apt_configure"},
+ {"$ref": "#/$defs/cc_apt_pipelining"},
+ {"$ref": "#/$defs/cc_bootcmd"},
+ {"$ref": "#/$defs/cc_byobu"},
+ {"$ref": "#/$defs/cc_ca_certs"},
+ {"$ref": "#/$defs/cc_chef"},
+ {"$ref": "#/$defs/cc_debug"},
+ {"$ref": "#/$defs/cc_disable_ec2_metadata"},
+ {"$ref": "#/$defs/cc_disk_setup"},
+ ]
+ found_subschema_defs = []
+ legacy_schema_keys = []
+ for subschema in schema["allOf"]:
+ if "$ref" in subschema:
+ found_subschema_defs.append(subschema)
+ else: # Legacy subschema sourced from cc_* module 'schema' attr
+ legacy_schema_keys.extend(subschema["properties"].keys())
+
+ assert expected_subschema_defs == found_subschema_defs
+ # This list will dwindle as we move legacy schema to new $defs
+ assert [
+ "drivers",
+ "keyboard",
+ "locale",
+ "locale_configfile",
+ "ntp",
+ "resize_rootfs",
+ "resizefs_enabled",
+ "resizefs_list",
+ "runcmd",
+ "snap",
+ "ubuntu_advantage",
+ "updates",
+ "write_files",
+ "write_files",
+ "zypper",
+ ] == sorted(legacy_schema_keys)
+
+
+class TestLoadDoc:
+
+ docs = get_module_variable("__doc__")
+
+ # TODO( Drop legacy test when all sub-schemas in cloud-init-schema.json )
+ @pytest.mark.parametrize(
+ "module_name",
+ (
+ "cc_apt_pipelining", # new style composite schema file
+ "cc_zypper_add_repo", # legacy sub-schema defined in module
+ ),
+ )
+ def test_report_docs_for_legacy_and_consolidated_schema(self, module_name):
+ doc = load_doc([module_name])
+ assert doc, "Unexpected empty docs for {}".format(module_name)
+ assert self.docs[module_name] == doc
+
+
+class Test_SchemapathForCloudconfig:
+ """Coverage tests for supported YAML formats."""
+
+ @pytest.mark.parametrize(
+ "source_content, expected",
+ (
+ (b"{}", {}), # assert empty config handled
+ # Multiple keys account for comments and whitespace lines
+ (b"#\na: va\n \nb: vb\n#\nc: vc", {"a": 2, "b": 4, "c": 6}),
+ # List items represented on correct line number
+ (b"a:\n - a1\n\n - a2\n", {"a": 1, "a.0": 2, "a.1": 4}),
+ # Nested dicts represented on correct line number
+ (b"a:\n a1:\n\n aa1: aa1v\n", {"a": 1, "a.a1": 2, "a.a1.aa1": 4}),
+ ),
+ )
+ def test_schemapaths_representatative_of_source_yaml(
+ self, source_content, expected
+ ):
+ """Validate schemapaths dict accurately represents source YAML line."""
+ cfg = yaml.safe_load(source_content)
+ assert expected == _schemapath_for_cloudconfig(
+ config=cfg, original_content=source_content
+ )
+
+
+class SchemaValidationErrorTest(CiTestCase):
+ """Test validate_cloudconfig_schema"""
+
+ def test_schema_validation_error_expects_schema_errors(self):
+ """SchemaValidationError is initialized from schema_errors."""
+ errors = (
+ ("key.path", 'unexpected key "junk"'),
+ ("key2.path", '"-123" is not a valid "hostname" format'),
+ )
+ exception = SchemaValidationError(schema_errors=errors)
+ self.assertIsInstance(exception, Exception)
+ self.assertEqual(exception.schema_errors, errors)
+ self.assertEqual(
+ 'Cloud config schema errors: key.path: unexpected key "junk", '
+ 'key2.path: "-123" is not a valid "hostname" format',
+ str(exception),
+ )
+ self.assertTrue(isinstance(exception, ValueError))
+
+
+class TestValidateCloudConfigSchema:
+ """Tests for validate_cloudconfig_schema."""
+
+ with_logs = True
+
+ @pytest.mark.parametrize(
+ "schema, call_count",
+ ((None, 1), ({"properties": {"p1": {"type": "string"}}}, 0)),
+ )
+ @skipUnlessJsonSchema()
+ @mock.patch("cloudinit.config.schema.get_schema")
+ def test_validateconfig_schema_use_full_schema_when_no_schema_param(
+ self, get_schema, schema, call_count
+ ):
+ """Use full schema when schema param is absent."""
+ get_schema.return_value = {"properties": {"p1": {"type": "string"}}}
+ kwargs = {"config": {"p1": "valid"}}
+ if schema:
+ kwargs["schema"] = schema
+ validate_cloudconfig_schema(**kwargs)
+ assert call_count == get_schema.call_count
+
+ @skipUnlessJsonSchema()
+ def test_validateconfig_schema_non_strict_emits_warnings(self, caplog):
+ """When strict is False validate_cloudconfig_schema emits warnings."""
+ schema = {"properties": {"p1": {"type": "string"}}}
+ validate_cloudconfig_schema({"p1": -1}, schema, strict=False)
+ [(module, log_level, log_msg)] = caplog.record_tuples
+ assert "cloudinit.config.schema" == module
+ assert logging.WARNING == log_level
+ assert (
+ "Invalid cloud-config provided:\np1: -1 is not of type 'string'"
+ == log_msg
+ )
+
+ @skipUnlessJsonSchema()
+ def test_validateconfig_schema_emits_warning_on_missing_jsonschema(
+ self, caplog
+ ):
+ """Warning from validate_cloudconfig_schema when missing jsonschema."""
+ schema = {"properties": {"p1": {"type": "string"}}}
+ with mock.patch.dict("sys.modules", **{"jsonschema": ImportError()}):
+ validate_cloudconfig_schema({"p1": -1}, schema, strict=True)
+ assert "Ignoring schema validation. jsonschema is not present" in (
+ caplog.text
+ )
+
+ @skipUnlessJsonSchema()
+ def test_validateconfig_schema_strict_raises_errors(self):
+ """When strict is True validate_cloudconfig_schema raises errors."""
+ schema = {"properties": {"p1": {"type": "string"}}}
+ with pytest.raises(SchemaValidationError) as context_mgr:
+ validate_cloudconfig_schema({"p1": -1}, schema, strict=True)
+ assert (
+ "Cloud config schema errors: p1: -1 is not of type 'string'"
+ == (str(context_mgr.value))
+ )
+
+ @skipUnlessJsonSchema()
+ def test_validateconfig_schema_honors_formats(self):
+ """With strict True, validate_cloudconfig_schema errors on format."""
+ schema = {"properties": {"p1": {"type": "string", "format": "email"}}}
+ with pytest.raises(SchemaValidationError) as context_mgr:
+ validate_cloudconfig_schema({"p1": "-1"}, schema, strict=True)
+ assert "Cloud config schema errors: p1: '-1' is not a 'email'" == (
+ str(context_mgr.value)
+ )
+
+ @skipUnlessJsonSchema()
+ def test_validateconfig_schema_honors_formats_strict_metaschema(self):
+ """With strict and strict_metaschema True, ensure errors on format"""
+ schema = {"properties": {"p1": {"type": "string", "format": "email"}}}
+ with pytest.raises(SchemaValidationError) as context_mgr:
+ validate_cloudconfig_schema(
+ {"p1": "-1"}, schema, strict=True, strict_metaschema=True
+ )
+ assert "Cloud config schema errors: p1: '-1' is not a 'email'" == str(
+ context_mgr.value
+ )
+
+ @skipUnlessJsonSchema()
+ def test_validateconfig_strict_metaschema_do_not_raise_exception(
+ self, caplog
+ ):
+ """With strict_metaschema=True, do not raise exceptions.
+
+ This flag is currently unused, but is intended for run-time validation.
+ This should warn, but not raise.
+ """
+ schema = {"properties": {"p1": {"types": "string", "format": "email"}}}
+ validate_cloudconfig_schema(
+ {"p1": "-1"}, schema, strict_metaschema=True
+ )
+ assert (
+ "Meta-schema validation failed, attempting to validate config"
+ in caplog.text
+ )
+
+
+class TestCloudConfigExamples:
+ metas = get_metas()
+ params = [
+ (meta["id"], example)
+ for meta in metas.values()
+ if meta and meta.get("examples")
+ for example in meta.get("examples")
+ ]
+
+ @pytest.mark.parametrize("schema_id, example", params)
+ @skipUnlessJsonSchema()
+ def test_validateconfig_schema_of_example(self, schema_id, example):
+ """For a given example in a config module we test if it is valid
+ according to the unified schema of all config modules
+ """
+ schema = get_schema()
+ config_load = safe_load(example)
+ validate_cloudconfig_schema(config_load, schema, strict=True)
+
+
+class ValidateCloudConfigFileTest(CiTestCase):
+ """Tests for validate_cloudconfig_file."""
+
+ def setUp(self):
+ super(ValidateCloudConfigFileTest, self).setUp()
+ self.config_file = self.tmp_path("cloudcfg.yaml")
+
+ def test_validateconfig_file_error_on_absent_file(self):
+ """On absent config_path, validate_cloudconfig_file errors."""
+ with self.assertRaises(RuntimeError) as context_mgr:
+ validate_cloudconfig_file("/not/here", {})
+ self.assertEqual(
+ "Configfile /not/here does not exist", str(context_mgr.exception)
+ )
+
+ def test_validateconfig_file_error_on_invalid_header(self):
+ """On invalid header, validate_cloudconfig_file errors.
+
+ A SchemaValidationError is raised when the file doesn't begin with
+ CLOUD_CONFIG_HEADER.
+ """
+ write_file(self.config_file, "#junk")
+ with self.assertRaises(SchemaValidationError) as context_mgr:
+ validate_cloudconfig_file(self.config_file, {})
+ self.assertEqual(
+ "Cloud config schema errors: format-l1.c1: File {0} needs to begin"
+ ' with "{1}"'.format(
+ self.config_file, CLOUD_CONFIG_HEADER.decode()
+ ),
+ str(context_mgr.exception),
+ )
+
+ def test_validateconfig_file_error_on_non_yaml_scanner_error(self):
+ """On non-yaml scan issues, validate_cloudconfig_file errors."""
+ # Generate a scanner error by providing text on a single line with
+ # improper indent.
+ write_file(self.config_file, "#cloud-config\nasdf:\nasdf")
+ with self.assertRaises(SchemaValidationError) as context_mgr:
+ validate_cloudconfig_file(self.config_file, {})
+ self.assertIn(
+ "schema errors: format-l3.c1: File {0} is not valid yaml.".format(
+ self.config_file
+ ),
+ str(context_mgr.exception),
+ )
+
+ def test_validateconfig_file_error_on_non_yaml_parser_error(self):
+ """On non-yaml parser issues, validate_cloudconfig_file errors."""
+ write_file(self.config_file, "#cloud-config\n{}}")
+ with self.assertRaises(SchemaValidationError) as context_mgr:
+ validate_cloudconfig_file(self.config_file, {})
+ self.assertIn(
+ "schema errors: format-l2.c3: File {0} is not valid yaml.".format(
+ self.config_file
+ ),
+ str(context_mgr.exception),
+ )
+
+ @skipUnlessJsonSchema()
+ def test_validateconfig_file_sctrictly_validates_schema(self):
+ """validate_cloudconfig_file raises errors on invalid schema."""
+ schema = {"properties": {"p1": {"type": "string", "format": "string"}}}
+ write_file(self.config_file, "#cloud-config\np1: -1")
+ with self.assertRaises(SchemaValidationError) as context_mgr:
+ validate_cloudconfig_file(self.config_file, schema)
+ self.assertEqual(
+ "Cloud config schema errors: p1: -1 is not of type 'string'",
+ str(context_mgr.exception),
+ )
+
+
+class GetSchemaDocTest(CiTestCase):
+ """Tests for get_meta_doc."""
+
+ def setUp(self):
+ super(GetSchemaDocTest, self).setUp()
+ self.required_schema = {
+ "title": "title",
+ "description": "description",
+ "id": "id",
+ "name": "name",
+ "frequency": "frequency",
+ "distros": ["debian", "rhel"],
+ }
+ self.meta: MetaSchema = {
+ "title": "title",
+ "description": "description",
+ "id": "id",
+ "name": "name",
+ "frequency": "frequency",
+ "distros": ["debian", "rhel"],
+ "examples": [
+ 'ex1:\n [don\'t, expand, "this"]',
+ "ex2: true",
+ ],
+ }
+
+ def test_get_meta_doc_returns_restructured_text(self):
+ """get_meta_doc returns restructured text for a cloudinit schema."""
+ full_schema = copy(self.required_schema)
+ full_schema.update(
+ {
+ "properties": {
+ "prop1": {
+ "type": "array",
+ "description": "prop-description",
+ "items": {"type": "integer"},
+ }
+ }
+ }
+ )
+
+ doc = get_meta_doc(self.meta, full_schema)
+ self.assertEqual(
+ dedent(
+ """
+ name
+ ----
+ **Summary:** title
+
+ description
+
+ **Internal name:** ``id``
+
+ **Module frequency:** frequency
+
+ **Supported distros:** debian, rhel
+
+ **Config schema**:
+ **prop1:** (array of integer) prop-description
+
+ **Examples**::
+
+ ex1:
+ [don't, expand, "this"]
+ # --- Example2 ---
+ ex2: true
+ """
+ ),
+ doc,
+ )
+
+ def test_get_meta_doc_handles_multiple_types(self):
+ """get_meta_doc delimits multiple property types with a '/'."""
+ schema = {"properties": {"prop1": {"type": ["string", "integer"]}}}
+ self.assertIn(
+ "**prop1:** (string/integer)", get_meta_doc(self.meta, schema)
+ )
+
+ def test_get_meta_doc_handles_enum_types(self):
+ """get_meta_doc converts enum types to yaml and delimits with '/'."""
+ schema = {"properties": {"prop1": {"enum": [True, False, "stuff"]}}}
+ self.assertIn(
+ "**prop1:** (true/false/stuff)", get_meta_doc(self.meta, schema)
+ )
+
+ def test_get_meta_doc_handles_nested_oneof_property_types(self):
+ """get_meta_doc describes array items oneOf declarations in type."""
+ schema = {
+ "properties": {
+ "prop1": {
+ "type": "array",
+ "items": {
+ "oneOf": [{"type": "string"}, {"type": "integer"}]
+ },
+ }
+ }
+ }
+ self.assertIn(
+ "**prop1:** (array of (string)/(integer))",
+ get_meta_doc(self.meta, schema),
+ )
+
+ def test_get_meta_doc_handles_string_examples(self):
+ """get_meta_doc properly indented examples as a list of strings."""
+ full_schema = copy(self.required_schema)
+ full_schema.update(
+ {
+ "examples": [
+ 'ex1:\n [don\'t, expand, "this"]',
+ "ex2: true",
+ ],
+ "properties": {
+ "prop1": {
+ "type": "array",
+ "description": "prop-description",
+ "items": {"type": "integer"},
+ }
+ },
+ }
+ )
+ self.assertIn(
+ dedent(
+ """
+ **Config schema**:
+ **prop1:** (array of integer) prop-description
+
+ **Examples**::
+
+ ex1:
+ [don't, expand, "this"]
+ # --- Example2 ---
+ ex2: true
+ """
+ ),
+ get_meta_doc(self.meta, full_schema),
+ )
+
+ def test_get_meta_doc_properly_parse_description(self):
+ """get_meta_doc description properly formatted"""
+ schema = {
+ "properties": {
+ "p1": {
+ "type": "string",
+ "description": dedent(
+ """\
+ This item
+ has the
+ following options:
+
+ - option1
+ - option2
+ - option3
+
+ The default value is
+ option1"""
+ ),
+ }
+ }
+ }
+
+ self.assertIn(
+ dedent(
+ """
+ **Config schema**:
+ **p1:** (string) This item has the following options:
+
+ - option1
+ - option2
+ - option3
+
+ The default value is option1
+
+ """
+ ),
+ get_meta_doc(self.meta, schema),
+ )
+
+ def test_get_meta_doc_raises_key_errors(self):
+ """get_meta_doc raises KeyErrors on missing keys."""
+ schema = {
+ "properties": {
+ "prop1": {
+ "type": "array",
+ "items": {
+ "oneOf": [{"type": "string"}, {"type": "integer"}]
+ },
+ }
+ }
+ }
+ for key in self.meta:
+ invalid_meta = copy(self.meta)
+ invalid_meta.pop(key)
+ with self.assertRaises(KeyError) as context_mgr:
+ get_meta_doc(invalid_meta, schema)
+ self.assertIn(key, str(context_mgr.exception))
+
+ def test_label_overrides_property_name(self):
+ """get_meta_doc overrides property name with label."""
+ schema = {
+ "properties": {
+ "prop1": {
+ "type": "string",
+ "label": "label1",
+ },
+ "prop_no_label": {
+ "type": "string",
+ },
+ "prop_array": {
+ "label": "array_label",
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "some_prop": {"type": "number"},
+ },
+ },
+ },
+ },
+ "patternProperties": {
+ "^.*$": {
+ "type": "string",
+ "label": "label2",
+ }
+ },
+ }
+ meta_doc = get_meta_doc(self.meta, schema)
+ assert "**label1:** (string)" in meta_doc
+ assert "**label2:** (string" in meta_doc
+ assert "**prop_no_label:** (string)" in meta_doc
+ assert "Each item in **array_label** list" in meta_doc
+
+ assert "prop1" not in meta_doc
+ assert ".*" not in meta_doc
+
+
+class AnnotatedCloudconfigFileTest(CiTestCase):
+ maxDiff = None
+
+ def test_annotated_cloudconfig_file_no_schema_errors(self):
+ """With no schema_errors, print the original content."""
+ content = b"ntp:\n pools: [ntp1.pools.com]\n"
+ self.assertEqual(
+ content, annotated_cloudconfig_file({}, content, schema_errors=[])
+ )
+
+ def test_annotated_cloudconfig_file_with_non_dict_cloud_config(self):
+ """Error when empty non-dict cloud-config is provided.
+
+ OurJSON validation when user-data is None type generates a bunch
+ schema validation errors of the format:
+ ('', "None is not of type 'object'"). Ignore those symptoms and
+ report the general problem instead.
+ """
+ content = b"\n\n\n"
+ expected = "\n".join(
+ [
+ content.decode(),
+ "# Errors: -------------",
+ "# E1: Cloud-config is not a YAML dict.\n\n",
+ ]
+ )
+ self.assertEqual(
+ expected,
+ annotated_cloudconfig_file(
+ None,
+ content,
+ schema_errors=[("", "None is not of type 'object'")],
+ ),
+ )
+
+ def test_annotated_cloudconfig_file_schema_annotates_and_adds_footer(self):
+ """With schema_errors, error lines are annotated and a footer added."""
+ content = dedent(
+ """\
+ #cloud-config
+ # comment
+ ntp:
+ pools: [-99, 75]
+ """
+ ).encode()
+ expected = dedent(
+ """\
+ #cloud-config
+ # comment
+ ntp: # E1
+ pools: [-99, 75] # E2,E3
+
+ # Errors: -------------
+ # E1: Some type error
+ # E2: -99 is not a string
+ # E3: 75 is not a string
+
+ """
+ )
+ parsed_config = safe_load(content[13:])
+ schema_errors = [
+ ("ntp", "Some type error"),
+ ("ntp.pools.0", "-99 is not a string"),
+ ("ntp.pools.1", "75 is not a string"),
+ ]
+ self.assertEqual(
+ expected,
+ annotated_cloudconfig_file(parsed_config, content, schema_errors),
+ )
+
+ def test_annotated_cloudconfig_file_annotates_separate_line_items(self):
+ """Errors are annotated for lists with items on separate lines."""
+ content = dedent(
+ """\
+ #cloud-config
+ # comment
+ ntp:
+ pools:
+ - -99
+ - 75
+ """
+ ).encode()
+ expected = dedent(
+ """\
+ ntp:
+ pools:
+ - -99 # E1
+ - 75 # E2
+ """
+ )
+ parsed_config = safe_load(content[13:])
+ schema_errors = [
+ ("ntp.pools.0", "-99 is not a string"),
+ ("ntp.pools.1", "75 is not a string"),
+ ]
+ self.assertIn(
+ expected,
+ annotated_cloudconfig_file(parsed_config, content, schema_errors),
+ )
+
+
+class TestMain:
+
+ exclusive_combinations = itertools.combinations(
+ ["--system", "--docs all", "--config-file something"], 2
+ )
+
+ @pytest.mark.parametrize("params", exclusive_combinations)
+ def test_main_exclusive_args(self, params, capsys):
+ """Main exits non-zero and error on required exclusive args."""
+ params = list(itertools.chain(*[a.split() for a in params]))
+ with mock.patch("sys.argv", ["mycmd"] + params):
+ with pytest.raises(SystemExit) as context_manager:
+ main()
+ assert 1 == context_manager.value.code
+
+ _out, err = capsys.readouterr()
+ expected = (
+ "Error:\n"
+ "Expected one of --config-file, --system or --docs arguments\n"
+ )
+ assert expected == err
+
+ def test_main_missing_args(self, capsys):
+ """Main exits non-zero and reports an error on missing parameters."""
+ with mock.patch("sys.argv", ["mycmd"]):
+ with pytest.raises(SystemExit) as context_manager:
+ main()
+ assert 1 == context_manager.value.code
+
+ _out, err = capsys.readouterr()
+ expected = (
+ "Error:\n"
+ "Expected one of --config-file, --system or --docs arguments\n"
+ )
+ assert expected == err
+
+ def test_main_absent_config_file(self, capsys):
+ """Main exits non-zero when config file is absent."""
+ myargs = ["mycmd", "--annotate", "--config-file", "NOT_A_FILE"]
+ with mock.patch("sys.argv", myargs):
+ with pytest.raises(SystemExit) as context_manager:
+ main()
+ assert 1 == context_manager.value.code
+ _out, err = capsys.readouterr()
+ assert "Error:\nConfigfile NOT_A_FILE does not exist\n" == err
+
+ def test_main_invalid_flag_combo(self, capsys):
+ """Main exits non-zero when invalid flag combo used."""
+ myargs = ["mycmd", "--annotate", "--docs", "DOES_NOT_MATTER"]
+ with mock.patch("sys.argv", myargs):
+ with pytest.raises(SystemExit) as context_manager:
+ main()
+ assert 1 == context_manager.value.code
+ _, err = capsys.readouterr()
+ assert (
+ "Error:\nInvalid flag combination. "
+ "Cannot use --annotate with --docs\n" == err
+ )
+
+ def test_main_prints_docs(self, capsys):
+ """When --docs parameter is provided, main generates documentation."""
+ myargs = ["mycmd", "--docs", "all"]
+ with mock.patch("sys.argv", myargs):
+ assert 0 == main(), "Expected 0 exit code"
+ out, _err = capsys.readouterr()
+ assert "\nNTP\n---\n" in out
+ assert "\nRuncmd\n------\n" in out
+
+ def test_main_validates_config_file(self, tmpdir, capsys):
+ """When --config-file parameter is provided, main validates schema."""
+ myyaml = tmpdir.join("my.yaml")
+ myargs = ["mycmd", "--config-file", myyaml.strpath]
+ myyaml.write(b"#cloud-config\nntp:") # shortest ntp schema
+ with mock.patch("sys.argv", myargs):
+ assert 0 == main(), "Expected 0 exit code"
+ out, _err = capsys.readouterr()
+ assert "Valid cloud-config: {0}\n".format(myyaml) == out
+
+ @mock.patch("cloudinit.config.schema.read_cfg_paths")
+ @mock.patch("cloudinit.config.schema.os.getuid", return_value=0)
+ def test_main_validates_system_userdata(
+ self, m_getuid, m_read_cfg_paths, capsys, paths
+ ):
+ """When --system is provided, main validates system userdata."""
+ m_read_cfg_paths.return_value = paths
+ ud_file = paths.get_ipath_cur("userdata_raw")
+ write_file(ud_file, b"#cloud-config\nntp:")
+ myargs = ["mycmd", "--system"]
+ with mock.patch("sys.argv", myargs):
+ assert 0 == main(), "Expected 0 exit code"
+ out, _err = capsys.readouterr()
+ assert "Valid cloud-config: system userdata\n" == out
+
+ @mock.patch("cloudinit.config.schema.os.getuid", return_value=1000)
+ def test_main_system_userdata_requires_root(self, m_getuid, capsys, paths):
+ """Non-root user can't use --system param"""
+ myargs = ["mycmd", "--system"]
+ with mock.patch("sys.argv", myargs):
+ with pytest.raises(SystemExit) as context_manager:
+ main()
+ assert 1 == context_manager.value.code
+ _out, err = capsys.readouterr()
+ expected = (
+ "Error:\nUnable to read system userdata as non-root user. "
+ "Try using sudo\n"
+ )
+ assert expected == err
+
+
+def _get_meta_doc_examples():
+ examples_dir = Path(cloud_init_project_dir("doc/examples"))
+ assert examples_dir.is_dir()
+
+ return (
+ str(f)
+ for f in examples_dir.glob("cloud-config*.txt")
+ if not f.name.startswith("cloud-config-archive")
+ )
+
+
+class TestSchemaDocExamples:
+ schema = get_schema()
+
+ @pytest.mark.parametrize("example_path", _get_meta_doc_examples())
+ @skipUnlessJsonSchema()
+ def test_schema_doc_examples(self, example_path):
+ validate_cloudconfig_file(example_path, self.schema)
+
+
+class TestStrictMetaschema:
+ """Validate that schemas follow a stricter metaschema definition than
+ the default. This disallows arbitrary key/value pairs.
+ """
+
+ @skipUnlessJsonSchema()
+ def test_modules(self):
+ """Validate all modules with a stricter metaschema"""
+ (validator, _) = get_jsonschema_validator()
+ for (name, value) in get_schemas().items():
+ if value:
+ validate_cloudconfig_metaschema(validator, value)
+ else:
+ logging.warning("module %s has no schema definition", name)
+
+ @skipUnlessJsonSchema()
+ def test_validate_bad_module(self):
+ """Throw exception by default, don't throw if throw=False
+
+ item should be 'items' and is therefore interpreted as an additional
+ property which is invalid with a strict metaschema
+ """
+ (validator, _) = get_jsonschema_validator()
+ schema = {
+ "type": "array",
+ "item": {
+ "type": "object",
+ },
+ }
+ with pytest.raises(
+ SchemaValidationError,
+ match=r"Additional properties are not allowed.*",
+ ):
+
+ validate_cloudconfig_metaschema(validator, schema)
+
+ validate_cloudconfig_metaschema(validator, schema, throw=False)
+
+
+# vi: ts=4 expandtab syntax=python
diff --git a/tests/unittests/test_distros/__init__.py b/tests/unittests/distros/__init__.py
index 5394aa56..e66b9446 100644
--- a/tests/unittests/test_distros/__init__.py
+++ b/tests/unittests/distros/__init__.py
@@ -1,9 +1,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
import copy
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import settings
+from cloudinit import distros, helpers, settings
def _get_distro(dtype, system_info=None):
@@ -14,8 +12,8 @@ def _get_distro(dtype, system_info=None):
example: _get_distro("debian")
"""
if system_info is None:
- system_info = copy.deepcopy(settings.CFG_BUILTIN['system_info'])
- system_info['distro'] = dtype
- paths = helpers.Paths(system_info['paths'])
+ system_info = copy.deepcopy(settings.CFG_BUILTIN["system_info"])
+ system_info["distro"] = dtype
+ paths = helpers.Paths(system_info["paths"])
distro_cls = distros.fetch(dtype)
return distro_cls(dtype, system_info, paths)
diff --git a/tests/unittests/distros/test_arch.py b/tests/unittests/distros/test_arch.py
new file mode 100644
index 00000000..5446295e
--- /dev/null
+++ b/tests/unittests/distros/test_arch.py
@@ -0,0 +1,55 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit import util
+from cloudinit.distros.arch import _render_network
+from tests.unittests.helpers import CiTestCase, dir2dict
+
+from . import _get_distro
+
+
+class TestArch(CiTestCase):
+ def test_get_distro(self):
+ distro = _get_distro("arch")
+ hostname = "myhostname"
+ hostfile = self.tmp_path("hostfile")
+ distro._write_hostname(hostname, hostfile)
+ self.assertEqual(hostname + "\n", util.load_file(hostfile))
+
+
+class TestRenderNetwork(CiTestCase):
+ def test_basic_static(self):
+ """Just the most basic static config.
+
+ note 'lo' should not be rendered as an interface."""
+ entries = {
+ "eth0": {
+ "auto": True,
+ "dns-nameservers": ["8.8.8.8"],
+ "bootproto": "static",
+ "address": "10.0.0.2",
+ "gateway": "10.0.0.1",
+ "netmask": "255.255.255.0",
+ },
+ "lo": {"auto": True},
+ }
+ target = self.tmp_dir()
+ devs = _render_network(entries, target=target)
+ files = dir2dict(target, prefix=target)
+ self.assertEqual(["eth0"], devs)
+ self.assertEqual(
+ {
+ "/etc/netctl/eth0": "\n".join(
+ [
+ "Address=10.0.0.2/255.255.255.0",
+ "Connection=ethernet",
+ "DNS=('8.8.8.8')",
+ "Gateway=10.0.0.1",
+ "IP=static",
+ "Interface=eth0",
+ "",
+ ]
+ ),
+ "/etc/resolv.conf": "nameserver 8.8.8.8\n",
+ },
+ files,
+ )
diff --git a/tests/unittests/distros/test_bsd_utils.py b/tests/unittests/distros/test_bsd_utils.py
new file mode 100644
index 00000000..d6f0aeed
--- /dev/null
+++ b/tests/unittests/distros/test_bsd_utils.py
@@ -0,0 +1,66 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import cloudinit.distros.bsd_utils as bsd_utils
+from tests.unittests.helpers import CiTestCase, ExitStack, mock
+
+RC_FILE = """
+if something; then
+ do something here
+fi
+hostname={hostname}
+"""
+
+
+class TestBsdUtils(CiTestCase):
+ def setUp(self):
+ super().setUp()
+ patches = ExitStack()
+ self.addCleanup(patches.close)
+
+ self.load_file = patches.enter_context(
+ mock.patch.object(bsd_utils.util, "load_file")
+ )
+
+ self.write_file = patches.enter_context(
+ mock.patch.object(bsd_utils.util, "write_file")
+ )
+
+ def test_get_rc_config_value(self):
+ self.load_file.return_value = "hostname=foo\n"
+ self.assertEqual(bsd_utils.get_rc_config_value("hostname"), "foo")
+ self.load_file.assert_called_with("/etc/rc.conf")
+
+ self.load_file.return_value = "hostname=foo"
+ self.assertEqual(bsd_utils.get_rc_config_value("hostname"), "foo")
+
+ self.load_file.return_value = 'hostname="foo"'
+ self.assertEqual(bsd_utils.get_rc_config_value("hostname"), "foo")
+
+ self.load_file.return_value = "hostname='foo'"
+ self.assertEqual(bsd_utils.get_rc_config_value("hostname"), "foo")
+
+ self.load_file.return_value = "hostname='foo\""
+ self.assertEqual(bsd_utils.get_rc_config_value("hostname"), "'foo\"")
+
+ self.load_file.return_value = ""
+ self.assertEqual(bsd_utils.get_rc_config_value("hostname"), None)
+
+ self.load_file.return_value = RC_FILE.format(hostname="foo")
+ self.assertEqual(bsd_utils.get_rc_config_value("hostname"), "foo")
+
+ def test_set_rc_config_value_unchanged(self):
+ # bsd_utils.set_rc_config_value('hostname', 'foo')
+ # self.write_file.assert_called_with('/etc/rc.conf', 'hostname=foo\n')
+
+ self.load_file.return_value = RC_FILE.format(hostname="foo")
+ self.write_file.assert_not_called()
+
+ def test_set_rc_config_value(self):
+ bsd_utils.set_rc_config_value("hostname", "foo")
+ self.write_file.assert_called_with("/etc/rc.conf", "hostname=foo\n")
+
+ self.load_file.return_value = RC_FILE.format(hostname="foo")
+ bsd_utils.set_rc_config_value("hostname", "bar")
+ self.write_file.assert_called_with(
+ "/etc/rc.conf", RC_FILE.format(hostname="bar")
+ )
diff --git a/tests/unittests/distros/test_create_users.py b/tests/unittests/distros/test_create_users.py
new file mode 100644
index 00000000..ddb039bd
--- /dev/null
+++ b/tests/unittests/distros/test_create_users.py
@@ -0,0 +1,282 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import re
+
+from cloudinit import distros, ssh_util
+from tests.unittests.helpers import CiTestCase, mock
+from tests.unittests.util import abstract_to_concrete
+
+
+@mock.patch("cloudinit.distros.util.system_is_snappy", return_value=False)
+@mock.patch("cloudinit.distros.subp.subp")
+class TestCreateUser(CiTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestCreateUser, self).setUp()
+ self.dist = abstract_to_concrete(distros.Distro)(
+ name="test", cfg=None, paths=None
+ )
+
+ def _useradd2call(self, args):
+ # return a mock call for the useradd command in args
+ # with expected 'logstring'.
+ args = ["useradd"] + args
+ logcmd = [a for a in args]
+ for i in range(len(args)):
+ if args[i] in ("--password",):
+ logcmd[i + 1] = "REDACTED"
+ return mock.call(args, logstring=logcmd)
+
+ def test_basic(self, m_subp, m_is_snappy):
+ user = "foouser"
+ self.dist.create_user(user)
+ self.assertEqual(
+ m_subp.call_args_list,
+ [
+ self._useradd2call([user, "-m"]),
+ mock.call(["passwd", "-l", user]),
+ ],
+ )
+
+ def test_no_home(self, m_subp, m_is_snappy):
+ user = "foouser"
+ self.dist.create_user(user, no_create_home=True)
+ self.assertEqual(
+ m_subp.call_args_list,
+ [
+ self._useradd2call([user, "-M"]),
+ mock.call(["passwd", "-l", user]),
+ ],
+ )
+
+ def test_system_user(self, m_subp, m_is_snappy):
+ # system user should have no home and get --system
+ user = "foouser"
+ self.dist.create_user(user, system=True)
+ self.assertEqual(
+ m_subp.call_args_list,
+ [
+ self._useradd2call([user, "--system", "-M"]),
+ mock.call(["passwd", "-l", user]),
+ ],
+ )
+
+ def test_explicit_no_home_false(self, m_subp, m_is_snappy):
+ user = "foouser"
+ self.dist.create_user(user, no_create_home=False)
+ self.assertEqual(
+ m_subp.call_args_list,
+ [
+ self._useradd2call([user, "-m"]),
+ mock.call(["passwd", "-l", user]),
+ ],
+ )
+
+ def test_unlocked(self, m_subp, m_is_snappy):
+ user = "foouser"
+ self.dist.create_user(user, lock_passwd=False)
+ self.assertEqual(
+ m_subp.call_args_list, [self._useradd2call([user, "-m"])]
+ )
+
+ def test_set_password(self, m_subp, m_is_snappy):
+ user = "foouser"
+ password = "passfoo"
+ self.dist.create_user(user, passwd=password)
+ self.assertEqual(
+ m_subp.call_args_list,
+ [
+ self._useradd2call([user, "--password", password, "-m"]),
+ mock.call(["passwd", "-l", user]),
+ ],
+ )
+
+ @mock.patch("cloudinit.distros.util.is_group")
+ def test_group_added(self, m_is_group, m_subp, m_is_snappy):
+ m_is_group.return_value = False
+ user = "foouser"
+ self.dist.create_user(user, groups=["group1"])
+ expected = [
+ mock.call(["groupadd", "group1"]),
+ self._useradd2call([user, "--groups", "group1", "-m"]),
+ mock.call(["passwd", "-l", user]),
+ ]
+ self.assertEqual(m_subp.call_args_list, expected)
+
+ @mock.patch("cloudinit.distros.util.is_group")
+ def test_only_new_group_added(self, m_is_group, m_subp, m_is_snappy):
+ ex_groups = ["existing_group"]
+ groups = ["group1", ex_groups[0]]
+ m_is_group.side_effect = lambda m: m in ex_groups
+ user = "foouser"
+ self.dist.create_user(user, groups=groups)
+ expected = [
+ mock.call(["groupadd", "group1"]),
+ self._useradd2call([user, "--groups", ",".join(groups), "-m"]),
+ mock.call(["passwd", "-l", user]),
+ ]
+ self.assertEqual(m_subp.call_args_list, expected)
+
+ @mock.patch("cloudinit.distros.util.is_group")
+ def test_create_groups_with_whitespace_string(
+ self, m_is_group, m_subp, m_is_snappy
+ ):
+ # groups supported as a comma delimeted string even with white space
+ m_is_group.return_value = False
+ user = "foouser"
+ self.dist.create_user(user, groups="group1, group2")
+ expected = [
+ mock.call(["groupadd", "group1"]),
+ mock.call(["groupadd", "group2"]),
+ self._useradd2call([user, "--groups", "group1,group2", "-m"]),
+ mock.call(["passwd", "-l", user]),
+ ]
+ self.assertEqual(m_subp.call_args_list, expected)
+
+ def test_explicit_sudo_false(self, m_subp, m_is_snappy):
+ user = "foouser"
+ self.dist.create_user(user, sudo=False)
+ self.assertEqual(
+ m_subp.call_args_list,
+ [
+ self._useradd2call([user, "-m"]),
+ mock.call(["passwd", "-l", user]),
+ ],
+ )
+
+ @mock.patch("cloudinit.ssh_util.setup_user_keys")
+ def test_setup_ssh_authorized_keys_with_string(
+ self, m_setup_user_keys, m_subp, m_is_snappy
+ ):
+ """ssh_authorized_keys allows string and calls setup_user_keys."""
+ user = "foouser"
+ self.dist.create_user(user, ssh_authorized_keys="mykey")
+ self.assertEqual(
+ m_subp.call_args_list,
+ [
+ self._useradd2call([user, "-m"]),
+ mock.call(["passwd", "-l", user]),
+ ],
+ )
+ m_setup_user_keys.assert_called_once_with(set(["mykey"]), user)
+
+ @mock.patch("cloudinit.ssh_util.setup_user_keys")
+ def test_setup_ssh_authorized_keys_with_list(
+ self, m_setup_user_keys, m_subp, m_is_snappy
+ ):
+ """ssh_authorized_keys allows lists and calls setup_user_keys."""
+ user = "foouser"
+ self.dist.create_user(user, ssh_authorized_keys=["key1", "key2"])
+ self.assertEqual(
+ m_subp.call_args_list,
+ [
+ self._useradd2call([user, "-m"]),
+ mock.call(["passwd", "-l", user]),
+ ],
+ )
+ m_setup_user_keys.assert_called_once_with(set(["key1", "key2"]), user)
+
+ @mock.patch("cloudinit.ssh_util.setup_user_keys")
+ def test_setup_ssh_authorized_keys_with_integer(
+ self, m_setup_user_keys, m_subp, m_is_snappy
+ ):
+ """ssh_authorized_keys warns on non-iterable/string type."""
+ user = "foouser"
+ self.dist.create_user(user, ssh_authorized_keys=-1)
+ m_setup_user_keys.assert_called_once_with(set([]), user)
+ match = re.match(
+ r".*WARNING: Invalid type \'<(type|class) \'int\'>\' detected for"
+ " 'ssh_authorized_keys'.*",
+ self.logs.getvalue(),
+ re.DOTALL,
+ )
+ self.assertIsNotNone(
+ match, "Missing ssh_authorized_keys invalid type warning"
+ )
+
+ @mock.patch("cloudinit.ssh_util.setup_user_keys")
+ def test_create_user_with_ssh_redirect_user_no_cloud_keys(
+ self, m_setup_user_keys, m_subp, m_is_snappy
+ ):
+ """Log a warning when trying to redirect a user no cloud ssh keys."""
+ user = "foouser"
+ self.dist.create_user(user, ssh_redirect_user="someuser")
+ self.assertIn(
+ "WARNING: Unable to disable SSH logins for foouser given "
+ "ssh_redirect_user: someuser. No cloud public-keys present.\n",
+ self.logs.getvalue(),
+ )
+ m_setup_user_keys.assert_not_called()
+
+ @mock.patch("cloudinit.ssh_util.setup_user_keys")
+ def test_create_user_with_ssh_redirect_user_with_cloud_keys(
+ self, m_setup_user_keys, m_subp, m_is_snappy
+ ):
+ """Disable ssh when ssh_redirect_user and cloud ssh keys are set."""
+ user = "foouser"
+ self.dist.create_user(
+ user, ssh_redirect_user="someuser", cloud_public_ssh_keys=["key1"]
+ )
+ disable_prefix = ssh_util.DISABLE_USER_OPTS
+ disable_prefix = disable_prefix.replace("$USER", "someuser")
+ disable_prefix = disable_prefix.replace("$DISABLE_USER", user)
+ m_setup_user_keys.assert_called_once_with(
+ set(["key1"]), "foouser", options=disable_prefix
+ )
+
+ @mock.patch("cloudinit.ssh_util.setup_user_keys")
+ def test_create_user_with_ssh_redirect_user_does_not_disable_auth_keys(
+ self, m_setup_user_keys, m_subp, m_is_snappy
+ ):
+ """Do not disable ssh_authorized_keys when ssh_redirect_user is set."""
+ user = "foouser"
+ self.dist.create_user(
+ user,
+ ssh_authorized_keys="auth1",
+ ssh_redirect_user="someuser",
+ cloud_public_ssh_keys=["key1"],
+ )
+ disable_prefix = ssh_util.DISABLE_USER_OPTS
+ disable_prefix = disable_prefix.replace("$USER", "someuser")
+ disable_prefix = disable_prefix.replace("$DISABLE_USER", user)
+ self.assertEqual(
+ m_setup_user_keys.call_args_list,
+ [
+ mock.call(set(["auth1"]), user), # not disabled
+ mock.call(set(["key1"]), "foouser", options=disable_prefix),
+ ],
+ )
+
+ @mock.patch("cloudinit.distros.subp.which")
+ def test_lock_with_usermod_if_no_passwd(
+ self, m_which, m_subp, m_is_snappy
+ ):
+ """Lock uses usermod --lock if no 'passwd' cmd available."""
+ m_which.side_effect = lambda m: m in ("usermod",)
+ self.dist.lock_passwd("bob")
+ self.assertEqual(
+ [mock.call(["usermod", "--lock", "bob"])], m_subp.call_args_list
+ )
+
+ @mock.patch("cloudinit.distros.subp.which")
+ def test_lock_with_passwd_if_available(self, m_which, m_subp, m_is_snappy):
+ """Lock with only passwd will use passwd."""
+ m_which.side_effect = lambda m: m in ("passwd",)
+ self.dist.lock_passwd("bob")
+ self.assertEqual(
+ [mock.call(["passwd", "-l", "bob"])], m_subp.call_args_list
+ )
+
+ @mock.patch("cloudinit.distros.subp.which")
+ def test_lock_raises_runtime_if_no_commands(
+ self, m_which, m_subp, m_is_snappy
+ ):
+ """Lock with no commands available raises RuntimeError."""
+ m_which.return_value = None
+ with self.assertRaises(RuntimeError):
+ self.dist.lock_passwd("bob")
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/distros/test_debian.py b/tests/unittests/distros/test_debian.py
new file mode 100644
index 00000000..c7c5932e
--- /dev/null
+++ b/tests/unittests/distros/test_debian.py
@@ -0,0 +1,211 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+from itertools import count, cycle
+from unittest import mock
+
+import pytest
+
+from cloudinit import distros, subp, util
+from cloudinit.distros.debian import APT_GET_COMMAND, APT_GET_WRAPPER
+from tests.unittests.helpers import FilesystemMockingTestCase
+
+
+@mock.patch("cloudinit.distros.debian.subp.subp")
+class TestDebianApplyLocale(FilesystemMockingTestCase):
+ def setUp(self):
+ super(TestDebianApplyLocale, self).setUp()
+ self.new_root = self.tmp_dir()
+ self.patchOS(self.new_root)
+ self.patchUtils(self.new_root)
+ self.spath = self.tmp_path("etc/default/locale", self.new_root)
+ cls = distros.fetch("debian")
+ self.distro = cls("debian", {}, None)
+
+ def test_no_rerun(self, m_subp):
+ """If system has defined locale, no re-run is expected."""
+ m_subp.return_value = (None, None)
+ locale = "en_US.UTF-8"
+ util.write_file(self.spath, "LANG=%s\n" % locale, omode="w")
+ self.distro.apply_locale(locale, out_fn=self.spath)
+ m_subp.assert_not_called()
+
+ def test_no_regen_on_c_utf8(self, m_subp):
+ """If locale is set to C.UTF8, do not attempt to call locale-gen"""
+ m_subp.return_value = (None, None)
+ locale = "C.UTF-8"
+ util.write_file(self.spath, "LANG=%s\n" % "en_US.UTF-8", omode="w")
+ self.distro.apply_locale(locale, out_fn=self.spath)
+ self.assertEqual(
+ [
+ [
+ "update-locale",
+ "--locale-file=" + self.spath,
+ "LANG=%s" % locale,
+ ]
+ ],
+ [p[0][0] for p in m_subp.call_args_list],
+ )
+
+ def test_rerun_if_different(self, m_subp):
+ """If system has different locale, locale-gen should be called."""
+ m_subp.return_value = (None, None)
+ locale = "en_US.UTF-8"
+ util.write_file(self.spath, "LANG=fr_FR.UTF-8", omode="w")
+ self.distro.apply_locale(locale, out_fn=self.spath)
+ self.assertEqual(
+ [
+ ["locale-gen", locale],
+ [
+ "update-locale",
+ "--locale-file=" + self.spath,
+ "LANG=%s" % locale,
+ ],
+ ],
+ [p[0][0] for p in m_subp.call_args_list],
+ )
+
+ def test_rerun_if_no_file(self, m_subp):
+ """If system has no locale file, locale-gen should be called."""
+ m_subp.return_value = (None, None)
+ locale = "en_US.UTF-8"
+ self.distro.apply_locale(locale, out_fn=self.spath)
+ self.assertEqual(
+ [
+ ["locale-gen", locale],
+ [
+ "update-locale",
+ "--locale-file=" + self.spath,
+ "LANG=%s" % locale,
+ ],
+ ],
+ [p[0][0] for p in m_subp.call_args_list],
+ )
+
+ def test_rerun_on_unset_system_locale(self, m_subp):
+ """If system has unset locale, locale-gen should be called."""
+ m_subp.return_value = (None, None)
+ locale = "en_US.UTF-8"
+ util.write_file(self.spath, "LANG=", omode="w")
+ self.distro.apply_locale(locale, out_fn=self.spath)
+ self.assertEqual(
+ [
+ ["locale-gen", locale],
+ [
+ "update-locale",
+ "--locale-file=" + self.spath,
+ "LANG=%s" % locale,
+ ],
+ ],
+ [p[0][0] for p in m_subp.call_args_list],
+ )
+
+ def test_rerun_on_mismatched_keys(self, m_subp):
+ """If key is LC_ALL and system has only LANG, rerun is expected."""
+ m_subp.return_value = (None, None)
+ locale = "en_US.UTF-8"
+ util.write_file(self.spath, "LANG=", omode="w")
+ self.distro.apply_locale(locale, out_fn=self.spath, keyname="LC_ALL")
+ self.assertEqual(
+ [
+ ["locale-gen", locale],
+ [
+ "update-locale",
+ "--locale-file=" + self.spath,
+ "LC_ALL=%s" % locale,
+ ],
+ ],
+ [p[0][0] for p in m_subp.call_args_list],
+ )
+
+ def test_falseish_locale_raises_valueerror(self, m_subp):
+ """locale as None or "" is invalid and should raise ValueError."""
+
+ with self.assertRaises(ValueError) as ctext_m:
+ self.distro.apply_locale(None)
+ m_subp.assert_not_called()
+
+ self.assertEqual(
+ "Failed to provide locale value.", str(ctext_m.exception)
+ )
+
+ with self.assertRaises(ValueError) as ctext_m:
+ self.distro.apply_locale("")
+ m_subp.assert_not_called()
+ self.assertEqual(
+ "Failed to provide locale value.", str(ctext_m.exception)
+ )
+
+
+@mock.patch.dict("os.environ", {}, clear=True)
+@mock.patch("cloudinit.distros.debian.subp.which", return_value=True)
+@mock.patch("cloudinit.distros.debian.subp.subp")
+class TestPackageCommand:
+ distro = distros.fetch("debian")("debian", {}, None)
+
+ @mock.patch(
+ "cloudinit.distros.debian.Distro._apt_lock_available",
+ return_value=True,
+ )
+ def test_simple_command(self, m_apt_avail, m_subp, m_which):
+ self.distro.package_command("update")
+ apt_args = [APT_GET_WRAPPER["command"]]
+ apt_args.extend(APT_GET_COMMAND)
+ apt_args.append("update")
+ expected_call = {
+ "args": apt_args,
+ "capture": False,
+ "env": {"DEBIAN_FRONTEND": "noninteractive"},
+ }
+ assert m_subp.call_args == mock.call(**expected_call)
+
+ @mock.patch(
+ "cloudinit.distros.debian.Distro._apt_lock_available",
+ side_effect=[False, False, True],
+ )
+ @mock.patch("cloudinit.distros.debian.time.sleep")
+ def test_wait_for_lock(self, m_sleep, m_apt_avail, m_subp, m_which):
+ self.distro._wait_for_apt_command("stub", {"args": "stub2"})
+ assert m_sleep.call_args_list == [mock.call(1), mock.call(1)]
+ assert m_subp.call_args_list == [mock.call(args="stub2")]
+
+ @mock.patch(
+ "cloudinit.distros.debian.Distro._apt_lock_available",
+ return_value=False,
+ )
+ @mock.patch("cloudinit.distros.debian.time.sleep")
+ @mock.patch("cloudinit.distros.debian.time.time", side_effect=count())
+ def test_lock_wait_timeout(
+ self, m_time, m_sleep, m_apt_avail, m_subp, m_which
+ ):
+ with pytest.raises(TimeoutError):
+ self.distro._wait_for_apt_command("stub", "stub2", timeout=5)
+ assert m_subp.call_args_list == []
+
+ @mock.patch(
+ "cloudinit.distros.debian.Distro._apt_lock_available",
+ side_effect=cycle([True, False]),
+ )
+ @mock.patch("cloudinit.distros.debian.time.sleep")
+ def test_lock_exception_wait(self, m_sleep, m_apt_avail, m_subp, m_which):
+ exception = subp.ProcessExecutionError(
+ exit_code=100, stderr="Could not get apt lock"
+ )
+ m_subp.side_effect = [exception, exception, "return_thing"]
+ ret = self.distro._wait_for_apt_command("stub", {"args": "stub2"})
+ assert ret == "return_thing"
+
+ @mock.patch(
+ "cloudinit.distros.debian.Distro._apt_lock_available",
+ side_effect=cycle([True, False]),
+ )
+ @mock.patch("cloudinit.distros.debian.time.sleep")
+ @mock.patch("cloudinit.distros.debian.time.time", side_effect=count())
+ def test_lock_exception_timeout(
+ self, m_time, m_sleep, m_apt_avail, m_subp, m_which
+ ):
+ m_subp.side_effect = subp.ProcessExecutionError(
+ exit_code=100, stderr="Could not get apt lock"
+ )
+ with pytest.raises(TimeoutError):
+ self.distro._wait_for_apt_command(
+ "stub", {"args": "stub2"}, timeout=5
+ )
diff --git a/tests/unittests/distros/test_dragonflybsd.py b/tests/unittests/distros/test_dragonflybsd.py
new file mode 100644
index 00000000..f0cd1b24
--- /dev/null
+++ b/tests/unittests/distros/test_dragonflybsd.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python3
+
+
+import cloudinit.util
+from tests.unittests.helpers import mock
+
+
+def test_find_dragonflybsd_part():
+ assert cloudinit.util.find_dragonflybsd_part("/dev/vbd0s3") == "vbd0s3"
+
+
+@mock.patch("cloudinit.util.is_DragonFlyBSD")
+@mock.patch("cloudinit.subp.subp")
+def test_parse_mount(mock_subp, m_is_DragonFlyBSD):
+ mount_out = """
+vbd0s3 on / (hammer2, local)
+devfs on /dev (devfs, nosymfollow, local)
+/dev/vbd0s0a on /boot (ufs, local)
+procfs on /proc (procfs, local)
+tmpfs on /var/run/shm (tmpfs, local)
+"""
+
+ mock_subp.return_value = (mount_out, "")
+ m_is_DragonFlyBSD.return_value = True
+ assert cloudinit.util.parse_mount("/") == ("vbd0s3", "hammer2", "/")
diff --git a/tests/unittests/test_distros/test_freebsd.py b/tests/unittests/distros/test_freebsd.py
index be565b04..22be5098 100644
--- a/tests/unittests/test_distros/test_freebsd.py
+++ b/tests/unittests/distros/test_freebsd.py
@@ -1,45 +1,43 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.util import (find_freebsd_part, get_path_dev_freebsd)
-from cloudinit.tests.helpers import (CiTestCase, mock)
-
import os
+from cloudinit.util import find_freebsd_part, get_path_dev_freebsd
+from tests.unittests.helpers import CiTestCase, mock
-class TestDeviceLookUp(CiTestCase):
- @mock.patch('cloudinit.subp.subp')
+class TestDeviceLookUp(CiTestCase):
+ @mock.patch("cloudinit.subp.subp")
def test_find_freebsd_part_label(self, mock_subp):
- glabel_out = '''
+ glabel_out = """
gptid/fa52d426-c337-11e6-8911-00155d4c5e47 N/A da0p1
label/rootfs N/A da0p2
label/swap N/A da0p3
-'''
+"""
mock_subp.return_value = (glabel_out, "")
res = find_freebsd_part("/dev/label/rootfs")
self.assertEqual("da0p2", res)
- @mock.patch('cloudinit.subp.subp')
+ @mock.patch("cloudinit.subp.subp")
def test_find_freebsd_part_gpt(self, mock_subp):
- glabel_out = '''
+ glabel_out = """
gpt/bootfs N/A vtbd0p1
gptid/3f4cbe26-75da-11e8-a8f2-002590ec6166 N/A vtbd0p1
gpt/swapfs N/A vtbd0p2
gpt/rootfs N/A vtbd0p3
iso9660/cidata N/A vtbd2
-'''
+"""
mock_subp.return_value = (glabel_out, "")
res = find_freebsd_part("/dev/gpt/rootfs")
self.assertEqual("vtbd0p3", res)
def test_get_path_dev_freebsd_label(self):
- mnt_list = '''
+ mnt_list = """
/dev/label/rootfs / ufs rw 1 1
devfs /dev devfs rw,multilabel 0 0
fdescfs /dev/fd fdescfs rw 0 0
/dev/da1s1 /mnt/resource ufs rw 2 2
-'''
- with mock.patch.object(os.path, 'exists',
- return_value=True):
- res = get_path_dev_freebsd('/etc', mnt_list)
+"""
+ with mock.patch.object(os.path, "exists", return_value=True):
+ res = get_path_dev_freebsd("/etc", mnt_list)
self.assertIsNotNone(res)
diff --git a/tests/unittests/distros/test_generic.py b/tests/unittests/distros/test_generic.py
new file mode 100644
index 00000000..93c5395c
--- /dev/null
+++ b/tests/unittests/distros/test_generic.py
@@ -0,0 +1,383 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import os
+import shutil
+import tempfile
+from unittest import mock
+
+import pytest
+
+from cloudinit import distros, util
+from tests.unittests import helpers
+
+unknown_arch_info = {
+ "arches": ["default"],
+ "failsafe": {
+ "primary": "http://fs-primary-default",
+ "security": "http://fs-security-default",
+ },
+}
+
+package_mirrors = [
+ {
+ "arches": ["i386", "amd64"],
+ "failsafe": {
+ "primary": "http://fs-primary-intel",
+ "security": "http://fs-security-intel",
+ },
+ "search": {
+ "primary": [
+ "http://%(ec2_region)s.ec2/",
+ "http://%(availability_zone)s.clouds/",
+ ],
+ "security": [
+ "http://security-mirror1-intel",
+ "http://security-mirror2-intel",
+ ],
+ },
+ },
+ {
+ "arches": ["armhf", "armel"],
+ "failsafe": {
+ "primary": "http://fs-primary-arm",
+ "security": "http://fs-security-arm",
+ },
+ },
+ unknown_arch_info,
+]
+
+gpmi = distros._get_package_mirror_info
+gapmi = distros._get_arch_package_mirror_info
+
+
+class TestGenericDistro(helpers.FilesystemMockingTestCase):
+ def setUp(self):
+ super(TestGenericDistro, self).setUp()
+ # Make a temp directoy for tests to use.
+ self.tmp = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, self.tmp)
+
+ def _write_load_sudoers(self, _user, rules):
+ cls = distros.fetch("ubuntu")
+ d = cls("ubuntu", {}, None)
+ os.makedirs(os.path.join(self.tmp, "etc"))
+ os.makedirs(os.path.join(self.tmp, "etc", "sudoers.d"))
+ self.patchOS(self.tmp)
+ self.patchUtils(self.tmp)
+ d.write_sudo_rules("harlowja", rules)
+ contents = util.load_file(d.ci_sudoers_fn)
+ return contents
+
+ def _count_in(self, lines_look_for, text_content):
+ found_amount = 0
+ for e in lines_look_for:
+ for line in text_content.splitlines():
+ line = line.strip()
+ if line == e:
+ found_amount += 1
+ return found_amount
+
+ def test_sudoers_ensure_rules(self):
+ rules = "ALL=(ALL:ALL) ALL"
+ contents = self._write_load_sudoers("harlowja", rules)
+ expected = ["harlowja ALL=(ALL:ALL) ALL"]
+ self.assertEqual(len(expected), self._count_in(expected, contents))
+ not_expected = [
+ "harlowja A",
+ "harlowja L",
+ "harlowja L",
+ ]
+ self.assertEqual(0, self._count_in(not_expected, contents))
+
+ def test_sudoers_ensure_rules_list(self):
+ rules = [
+ "ALL=(ALL:ALL) ALL",
+ "B-ALL=(ALL:ALL) ALL",
+ "C-ALL=(ALL:ALL) ALL",
+ ]
+ contents = self._write_load_sudoers("harlowja", rules)
+ expected = [
+ "harlowja ALL=(ALL:ALL) ALL",
+ "harlowja B-ALL=(ALL:ALL) ALL",
+ "harlowja C-ALL=(ALL:ALL) ALL",
+ ]
+ self.assertEqual(len(expected), self._count_in(expected, contents))
+ not_expected = [
+ "harlowja A",
+ "harlowja L",
+ "harlowja L",
+ ]
+ self.assertEqual(0, self._count_in(not_expected, contents))
+
+ def test_sudoers_ensure_new(self):
+ cls = distros.fetch("ubuntu")
+ d = cls("ubuntu", {}, None)
+ self.patchOS(self.tmp)
+ self.patchUtils(self.tmp)
+ d.ensure_sudo_dir("/b")
+ contents = util.load_file("/etc/sudoers")
+ self.assertIn("includedir /b", contents)
+ self.assertTrue(os.path.isdir("/b"))
+
+ def test_sudoers_ensure_append(self):
+ cls = distros.fetch("ubuntu")
+ d = cls("ubuntu", {}, None)
+ self.patchOS(self.tmp)
+ self.patchUtils(self.tmp)
+ util.write_file("/etc/sudoers", "josh, josh\n")
+ d.ensure_sudo_dir("/b")
+ contents = util.load_file("/etc/sudoers")
+ self.assertIn("includedir /b", contents)
+ self.assertTrue(os.path.isdir("/b"))
+ self.assertIn("josh", contents)
+ self.assertEqual(2, contents.count("josh"))
+
+ def test_sudoers_ensure_only_one_includedir(self):
+ cls = distros.fetch("ubuntu")
+ d = cls("ubuntu", {}, None)
+ self.patchOS(self.tmp)
+ self.patchUtils(self.tmp)
+ for char in ["#", "@"]:
+ util.write_file("/etc/sudoers", "{}includedir /b".format(char))
+ d.ensure_sudo_dir("/b")
+ contents = util.load_file("/etc/sudoers")
+ self.assertIn("includedir /b", contents)
+ self.assertTrue(os.path.isdir("/b"))
+ self.assertEqual(1, contents.count("includedir /b"))
+
+ def test_arch_package_mirror_info_unknown(self):
+ """for an unknown arch, we should get back that with arch 'default'."""
+ arch_mirrors = gapmi(package_mirrors, arch="unknown")
+ self.assertEqual(unknown_arch_info, arch_mirrors)
+
+ def test_arch_package_mirror_info_known(self):
+ arch_mirrors = gapmi(package_mirrors, arch="amd64")
+ self.assertEqual(package_mirrors[0], arch_mirrors)
+
+ def test_systemd_in_use(self):
+ cls = distros.fetch("ubuntu")
+ d = cls("ubuntu", {}, None)
+ self.patchOS(self.tmp)
+ self.patchUtils(self.tmp)
+ os.makedirs("/run/systemd/system")
+ self.assertTrue(d.uses_systemd())
+
+ def test_systemd_not_in_use(self):
+ cls = distros.fetch("ubuntu")
+ d = cls("ubuntu", {}, None)
+ self.patchOS(self.tmp)
+ self.patchUtils(self.tmp)
+ self.assertFalse(d.uses_systemd())
+
+ def test_systemd_symlink(self):
+ cls = distros.fetch("ubuntu")
+ d = cls("ubuntu", {}, None)
+ self.patchOS(self.tmp)
+ self.patchUtils(self.tmp)
+ os.makedirs("/run/systemd")
+ os.symlink("/", "/run/systemd/system")
+ self.assertFalse(d.uses_systemd())
+
+ @mock.patch("cloudinit.distros.debian.read_system_locale")
+ def test_get_locale_ubuntu(self, m_locale):
+ """Test ubuntu distro returns locale set to C.UTF-8"""
+ m_locale.return_value = "C.UTF-8"
+ cls = distros.fetch("ubuntu")
+ d = cls("ubuntu", {}, None)
+ locale = d.get_locale()
+ self.assertEqual("C.UTF-8", locale)
+
+ def test_get_locale_rhel(self):
+ """Test rhel distro returns NotImplementedError exception"""
+ cls = distros.fetch("rhel")
+ d = cls("rhel", {}, None)
+ with self.assertRaises(NotImplementedError):
+ d.get_locale()
+
+ def test_expire_passwd_uses_chpasswd(self):
+ """Test ubuntu.expire_passwd uses the passwd command."""
+ for d_name in ("ubuntu", "rhel"):
+ cls = distros.fetch(d_name)
+ d = cls(d_name, {}, None)
+ with mock.patch("cloudinit.subp.subp") as m_subp:
+ d.expire_passwd("myuser")
+ m_subp.assert_called_once_with(["passwd", "--expire", "myuser"])
+
+ def test_expire_passwd_freebsd_uses_pw_command(self):
+ """Test FreeBSD.expire_passwd uses the pw command."""
+ cls = distros.fetch("freebsd")
+ d = cls("freebsd", {}, None)
+ with mock.patch("cloudinit.subp.subp") as m_subp:
+ d.expire_passwd("myuser")
+ m_subp.assert_called_once_with(
+ ["pw", "usermod", "myuser", "-p", "01-Jan-1970"]
+ )
+
+
+class TestGetPackageMirrors:
+ def return_first(self, mlist):
+ if not mlist:
+ return None
+ return mlist[0]
+
+ def return_second(self, mlist):
+ if not mlist:
+ return None
+
+ return mlist[1] if len(mlist) > 1 else None
+
+ def return_none(self, _mlist):
+ return None
+
+ def return_last(self, mlist):
+ if not mlist:
+ return None
+ return mlist[-1]
+
+ @pytest.mark.parametrize(
+ "allow_ec2_mirror, platform_type, mirrors",
+ [
+ (
+ True,
+ "ec2",
+ [
+ {
+ "primary": "http://us-east-1.ec2/",
+ "security": "http://security-mirror1-intel",
+ },
+ {
+ "primary": "http://us-east-1a.clouds/",
+ "security": "http://security-mirror2-intel",
+ },
+ ],
+ ),
+ (
+ True,
+ "other",
+ [
+ {
+ "primary": "http://us-east-1.ec2/",
+ "security": "http://security-mirror1-intel",
+ },
+ {
+ "primary": "http://us-east-1a.clouds/",
+ "security": "http://security-mirror2-intel",
+ },
+ ],
+ ),
+ (
+ False,
+ "ec2",
+ [
+ {
+ "primary": "http://us-east-1.ec2/",
+ "security": "http://security-mirror1-intel",
+ },
+ {
+ "primary": "http://us-east-1a.clouds/",
+ "security": "http://security-mirror2-intel",
+ },
+ ],
+ ),
+ (
+ False,
+ "other",
+ [
+ {
+ "primary": "http://us-east-1a.clouds/",
+ "security": "http://security-mirror1-intel",
+ },
+ {
+ "primary": "http://fs-primary-intel",
+ "security": "http://security-mirror2-intel",
+ },
+ ],
+ ),
+ ],
+ )
+ def test_get_package_mirror_info_az_ec2(
+ self, allow_ec2_mirror, platform_type, mirrors
+ ):
+ flag_path = (
+ "cloudinit.distros.ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES"
+ )
+ with mock.patch(flag_path, allow_ec2_mirror):
+ arch_mirrors = gapmi(package_mirrors, arch="amd64")
+ data_source_mock = mock.Mock(
+ availability_zone="us-east-1a", platform_type=platform_type
+ )
+
+ results = gpmi(
+ arch_mirrors,
+ data_source=data_source_mock,
+ mirror_filter=self.return_first,
+ )
+ assert results == mirrors[0]
+
+ results = gpmi(
+ arch_mirrors,
+ data_source=data_source_mock,
+ mirror_filter=self.return_second,
+ )
+ assert results == mirrors[1]
+
+ results = gpmi(
+ arch_mirrors,
+ data_source=data_source_mock,
+ mirror_filter=self.return_none,
+ )
+ assert results == package_mirrors[0]["failsafe"]
+
+ def test_get_package_mirror_info_az_non_ec2(self):
+ arch_mirrors = gapmi(package_mirrors, arch="amd64")
+ data_source_mock = mock.Mock(availability_zone="nova.cloudvendor")
+
+ results = gpmi(
+ arch_mirrors,
+ data_source=data_source_mock,
+ mirror_filter=self.return_first,
+ )
+ assert results == {
+ "primary": "http://nova.cloudvendor.clouds/",
+ "security": "http://security-mirror1-intel",
+ }
+
+ results = gpmi(
+ arch_mirrors,
+ data_source=data_source_mock,
+ mirror_filter=self.return_last,
+ )
+ assert results == {
+ "primary": "http://nova.cloudvendor.clouds/",
+ "security": "http://security-mirror2-intel",
+ }
+
+ def test_get_package_mirror_info_none(self):
+ arch_mirrors = gapmi(package_mirrors, arch="amd64")
+ data_source_mock = mock.Mock(availability_zone=None)
+
+ # because both search entries here replacement based on
+ # availability-zone, the filter will be called with an empty list and
+ # failsafe should be taken.
+ results = gpmi(
+ arch_mirrors,
+ data_source=data_source_mock,
+ mirror_filter=self.return_first,
+ )
+ assert results == {
+ "primary": "http://fs-primary-intel",
+ "security": "http://security-mirror1-intel",
+ }
+
+ results = gpmi(
+ arch_mirrors,
+ data_source=data_source_mock,
+ mirror_filter=self.return_last,
+ )
+ assert results == {
+ "primary": "http://fs-primary-intel",
+ "security": "http://security-mirror2-intel",
+ }
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_distros/test_gentoo.py b/tests/unittests/distros/test_gentoo.py
index 37a4f51f..dadf5df5 100644
--- a/tests/unittests/test_distros/test_gentoo.py
+++ b/tests/unittests/distros/test_gentoo.py
@@ -1,13 +1,12 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import util
-from cloudinit import atomic_helper
-from cloudinit.tests.helpers import CiTestCase
+from cloudinit import atomic_helper, util
+from tests.unittests.helpers import CiTestCase
+
from . import _get_distro
class TestGentoo(CiTestCase):
-
def test_write_hostname(self):
distro = _get_distro("gentoo")
hostname = "myhostname"
@@ -22,5 +21,7 @@ class TestGentoo(CiTestCase):
hostfile = self.tmp_path("hostfile")
atomic_helper.write_file(hostfile, contents, omode="w")
distro._write_hostname(hostname, hostfile)
- self.assertEqual('#This is the hostname\nhostname="myhostname"\n',
- util.load_file(hostfile))
+ self.assertEqual(
+ '#This is the hostname\nhostname="myhostname"\n',
+ util.load_file(hostfile),
+ )
diff --git a/tests/unittests/test_distros/test_hostname.py b/tests/unittests/distros/test_hostname.py
index f6d4dbe5..2cbbb3e2 100644
--- a/tests/unittests/test_distros/test_hostname.py
+++ b/tests/unittests/distros/test_hostname.py
@@ -4,13 +4,12 @@ import unittest
from cloudinit.distros.parsers import hostname
-
-BASE_HOSTNAME = '''
+BASE_HOSTNAME = """
# My super-duper-hostname
blahblah
-'''
+"""
BASE_HOSTNAME = BASE_HOSTNAME.strip()
@@ -18,7 +17,7 @@ class TestHostnameHelper(unittest.TestCase):
def test_parse_same(self):
hn = hostname.HostnameConf(BASE_HOSTNAME)
self.assertEqual(str(hn).strip(), BASE_HOSTNAME)
- self.assertEqual(hn.hostname, 'blahblah')
+ self.assertEqual(hn.hostname, "blahblah")
def test_no_adjust_hostname(self):
hn = hostname.HostnameConf(BASE_HOSTNAME)
@@ -29,14 +28,15 @@ class TestHostnameHelper(unittest.TestCase):
def test_adjust_hostname(self):
hn = hostname.HostnameConf(BASE_HOSTNAME)
prev_name = hn.hostname
- self.assertEqual(prev_name, 'blahblah')
+ self.assertEqual(prev_name, "blahblah")
hn.set_hostname("bbbbd")
- self.assertEqual(hn.hostname, 'bbbbd')
- expected_out = '''
+ self.assertEqual(hn.hostname, "bbbbd")
+ expected_out = """
# My super-duper-hostname
bbbbd
-'''
+"""
self.assertEqual(str(hn).strip(), expected_out.strip())
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/distros/test_hosts.py b/tests/unittests/distros/test_hosts.py
new file mode 100644
index 00000000..faffd912
--- /dev/null
+++ b/tests/unittests/distros/test_hosts.py
@@ -0,0 +1,47 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import unittest
+
+from cloudinit.distros.parsers import hosts
+
+BASE_ETC = """
+# Example
+127.0.0.1 localhost
+192.168.1.10 foo.mydomain.org foo
+192.168.1.10 bar.mydomain.org bar
+146.82.138.7 master.debian.org master
+209.237.226.90 www.opensource.org
+"""
+BASE_ETC = BASE_ETC.strip()
+
+
+class TestHostsHelper(unittest.TestCase):
+ def test_parse(self):
+ eh = hosts.HostsConf(BASE_ETC)
+ self.assertEqual(eh.get_entry("127.0.0.1"), [["localhost"]])
+ self.assertEqual(
+ eh.get_entry("192.168.1.10"),
+ [["foo.mydomain.org", "foo"], ["bar.mydomain.org", "bar"]],
+ )
+ eh = str(eh)
+ self.assertTrue(eh.startswith("# Example"))
+
+ def test_add(self):
+ eh = hosts.HostsConf(BASE_ETC)
+ eh.add_entry("127.0.0.0", "blah")
+ self.assertEqual(eh.get_entry("127.0.0.0"), [["blah"]])
+ eh.add_entry("127.0.0.3", "blah", "blah2", "blah3")
+ self.assertEqual(
+ eh.get_entry("127.0.0.3"), [["blah", "blah2", "blah3"]]
+ )
+
+ def test_del(self):
+ eh = hosts.HostsConf(BASE_ETC)
+ eh.add_entry("127.0.0.0", "blah")
+ self.assertEqual(eh.get_entry("127.0.0.0"), [["blah"]])
+
+ eh.del_entries("127.0.0.0")
+ self.assertEqual(eh.get_entry("127.0.0.0"), [])
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/distros/test_init.py b/tests/unittests/distros/test_init.py
new file mode 100644
index 00000000..8f3c8978
--- /dev/null
+++ b/tests/unittests/distros/test_init.py
@@ -0,0 +1,248 @@
+# Copyright (C) 2020 Canonical Ltd.
+#
+# Author: Daniel Watkins <oddbloke@ubuntu.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+"""Tests for cloudinit/distros/__init__.py"""
+
+from unittest import mock
+
+import pytest
+
+from cloudinit.distros import LDH_ASCII_CHARS, _get_package_mirror_info
+
+# In newer versions of Python, these characters will be omitted instead
+# of substituted because of security concerns.
+# See https://bugs.python.org/issue43882
+SECURITY_URL_CHARS = "\n\r\t"
+
+# Define a set of characters we would expect to be replaced
+INVALID_URL_CHARS = [
+ chr(x)
+ for x in range(127)
+ if chr(x) not in LDH_ASCII_CHARS + SECURITY_URL_CHARS
+]
+for separator in [":", ".", "/", "#", "?", "@", "[", "]"]:
+ # Remove from the set characters that either separate hostname parts (":",
+ # "."), terminate hostnames ("/", "#", "?", "@"), or cause Python to be
+ # unable to parse URLs ("[", "]").
+ INVALID_URL_CHARS.remove(separator)
+
+
+class TestGetPackageMirrorInfo:
+ """
+ Tests for cloudinit.distros._get_package_mirror_info.
+
+ These supplement the tests in tests/unittests/test_distros/test_generic.py
+ which are more focused on testing a single production-like configuration.
+ These tests are more focused on specific aspects of the unit under test.
+ """
+
+ @pytest.mark.parametrize(
+ "mirror_info,expected",
+ [
+ # Empty info gives empty return
+ ({}, {}),
+ # failsafe values used if present
+ (
+ {
+ "failsafe": {
+ "primary": "http://value",
+ "security": "http://other",
+ }
+ },
+ {"primary": "http://value", "security": "http://other"},
+ ),
+ # search values used if present
+ (
+ {
+ "search": {
+ "primary": ["http://value"],
+ "security": ["http://other"],
+ }
+ },
+ {"primary": ["http://value"], "security": ["http://other"]},
+ ),
+ # failsafe values used if search value not present
+ (
+ {
+ "search": {"primary": ["http://value"]},
+ "failsafe": {"security": "http://other"},
+ },
+ {"primary": ["http://value"], "security": "http://other"},
+ ),
+ ],
+ )
+ def test_get_package_mirror_info_failsafe(self, mirror_info, expected):
+ """
+ Test the interaction between search and failsafe inputs
+
+ (This doesn't test the case where the mirror_filter removes all search
+ options; test_failsafe_used_if_all_search_results_filtered_out covers
+ that.)
+ """
+ assert expected == _get_package_mirror_info(
+ mirror_info, mirror_filter=lambda x: x
+ )
+
+ def test_failsafe_used_if_all_search_results_filtered_out(self):
+ """Test the failsafe option used if all search options eliminated."""
+ mirror_info = {
+ "search": {"primary": ["http://value"]},
+ "failsafe": {"primary": "http://other"},
+ }
+ assert {"primary": "http://other"} == _get_package_mirror_info(
+ mirror_info, mirror_filter=lambda x: False
+ )
+
+ @pytest.mark.parametrize(
+ "allow_ec2_mirror, platform_type", [(True, "ec2")]
+ )
+ @pytest.mark.parametrize(
+ "availability_zone,region,patterns,expected",
+ (
+ # Test ec2_region alone
+ (
+ "fk-fake-1f",
+ None,
+ ["http://EC2-%(ec2_region)s/ubuntu"],
+ ["http://ec2-fk-fake-1/ubuntu"],
+ ),
+ # Test availability_zone alone
+ (
+ "fk-fake-1f",
+ None,
+ ["http://AZ-%(availability_zone)s/ubuntu"],
+ ["http://az-fk-fake-1f/ubuntu"],
+ ),
+ # Test region alone
+ (
+ None,
+ "fk-fake-1",
+ ["http://RG-%(region)s/ubuntu"],
+ ["http://rg-fk-fake-1/ubuntu"],
+ ),
+ # Test that ec2_region is not available for non-matching AZs
+ (
+ "fake-fake-1f",
+ None,
+ [
+ "http://EC2-%(ec2_region)s/ubuntu",
+ "http://AZ-%(availability_zone)s/ubuntu",
+ ],
+ ["http://az-fake-fake-1f/ubuntu"],
+ ),
+ # Test that template order maintained
+ (
+ None,
+ "fake-region",
+ [
+ "http://RG-%(region)s-2/ubuntu",
+ "http://RG-%(region)s-1/ubuntu",
+ ],
+ [
+ "http://rg-fake-region-2/ubuntu",
+ "http://rg-fake-region-1/ubuntu",
+ ],
+ ),
+ # Test that non-ASCII hostnames are IDNA encoded;
+ # "IDNA-ТεЅТ̣".encode('idna') == b"xn--idna--4kd53hh6aba3q"
+ (
+ None,
+ "ТεЅТ̣",
+ ["http://www.IDNA-%(region)s.com/ubuntu"],
+ ["http://www.xn--idna--4kd53hh6aba3q.com/ubuntu"],
+ ),
+ # Test that non-ASCII hostnames with a port are IDNA encoded;
+ # "IDNA-ТεЅТ̣".encode('idna') == b"xn--idna--4kd53hh6aba3q"
+ (
+ None,
+ "ТεЅТ̣",
+ ["http://www.IDNA-%(region)s.com:8080/ubuntu"],
+ ["http://www.xn--idna--4kd53hh6aba3q.com:8080/ubuntu"],
+ ),
+ # Test that non-ASCII non-hostname parts of URLs are unchanged
+ (
+ None,
+ "ТεЅТ̣",
+ ["http://www.example.com/%(region)s/ubuntu"],
+ ["http://www.example.com/ТεЅТ̣/ubuntu"],
+ ),
+ # Test that IPv4 addresses are unchanged
+ (
+ None,
+ "fk-fake-1",
+ ["http://192.168.1.1:8080/%(region)s/ubuntu"],
+ ["http://192.168.1.1:8080/fk-fake-1/ubuntu"],
+ ),
+ # Test that IPv6 addresses are unchanged
+ (
+ None,
+ "fk-fake-1",
+ ["http://[2001:67c:1360:8001::23]/%(region)s/ubuntu"],
+ ["http://[2001:67c:1360:8001::23]/fk-fake-1/ubuntu"],
+ ),
+ # Test that unparseable URLs are filtered out of the mirror list
+ (
+ None,
+ "inv[lid",
+ [
+ "http://%(region)s.in.hostname/should/be/filtered",
+ "http://but.not.in.the.path/%(region)s",
+ ],
+ ["http://but.not.in.the.path/inv[lid"],
+ ),
+ (
+ None,
+ "-some-region-",
+ ["http://-lead-ing.%(region)s.trail-ing-.example.com/ubuntu"],
+ ["http://lead-ing.some-region.trail-ing.example.com/ubuntu"],
+ ),
+ )
+ + tuple(
+ # Dynamically generate a test case for each non-LDH
+ # (Letters/Digits/Hyphen) ASCII character, testing that it is
+ # substituted with a hyphen
+ (
+ None,
+ "fk{0}fake{0}1".format(invalid_char),
+ ["http://%(region)s/ubuntu"],
+ ["http://fk-fake-1/ubuntu"],
+ )
+ for invalid_char in INVALID_URL_CHARS
+ ),
+ )
+ def test_valid_substitution(
+ self,
+ allow_ec2_mirror,
+ platform_type,
+ availability_zone,
+ region,
+ patterns,
+ expected,
+ ):
+ """Test substitution works as expected."""
+ flag_path = (
+ "cloudinit.distros.ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES"
+ )
+
+ m_data_source = mock.Mock(
+ availability_zone=availability_zone,
+ region=region,
+ platform_type=platform_type,
+ )
+ mirror_info = {"search": {"primary": patterns}}
+
+ with mock.patch(flag_path, allow_ec2_mirror):
+ ret = _get_package_mirror_info(
+ mirror_info,
+ data_source=m_data_source,
+ mirror_filter=lambda x: x,
+ )
+ print(allow_ec2_mirror)
+ print(platform_type)
+ print(availability_zone)
+ print(region)
+ print(patterns)
+ print(expected)
+ assert {"primary": expected} == ret
diff --git a/tests/unittests/distros/test_manage_service.py b/tests/unittests/distros/test_manage_service.py
new file mode 100644
index 00000000..9e64b35c
--- /dev/null
+++ b/tests/unittests/distros/test_manage_service.py
@@ -0,0 +1,41 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from tests.unittests.helpers import CiTestCase, mock
+from tests.unittests.util import MockDistro
+
+
+class TestManageService(CiTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestManageService, self).setUp()
+ self.dist = MockDistro()
+
+ @mock.patch.object(MockDistro, "uses_systemd", return_value=False)
+ @mock.patch("cloudinit.distros.subp.subp")
+ def test_manage_service_systemctl_initcmd(self, m_subp, m_sysd):
+ self.dist.init_cmd = ["systemctl"]
+ self.dist.manage_service("start", "myssh")
+ m_subp.assert_called_with(
+ ["systemctl", "start", "myssh"], capture=True
+ )
+
+ @mock.patch.object(MockDistro, "uses_systemd", return_value=False)
+ @mock.patch("cloudinit.distros.subp.subp")
+ def test_manage_service_service_initcmd(self, m_subp, m_sysd):
+ self.dist.init_cmd = ["service"]
+ self.dist.manage_service("start", "myssh")
+ m_subp.assert_called_with(["service", "myssh", "start"], capture=True)
+
+ @mock.patch.object(MockDistro, "uses_systemd", return_value=True)
+ @mock.patch("cloudinit.distros.subp.subp")
+ def test_manage_service_systemctl(self, m_subp, m_sysd):
+ self.dist.init_cmd = ["ignore"]
+ self.dist.manage_service("start", "myssh")
+ m_subp.assert_called_with(
+ ["systemctl", "start", "myssh"], capture=True
+ )
+
+
+# vi: ts=4 sw=4 expandtab
diff --git a/tests/unittests/test_distros/test_netbsd.py b/tests/unittests/distros/test_netbsd.py
index 11a68d2a..0bc6dfbd 100644
--- a/tests/unittests/test_distros/test_netbsd.py
+++ b/tests/unittests/distros/test_netbsd.py
@@ -1,10 +1,11 @@
-import cloudinit.distros.netbsd
+import unittest.mock as mock
import pytest
-import unittest.mock as mock
+
+import cloudinit.distros.netbsd
-@pytest.mark.parametrize('with_pkgin', (True, False))
+@pytest.mark.parametrize("with_pkgin", (True, False))
@mock.patch("cloudinit.distros.netbsd.os")
def test_init(m_os, with_pkgin):
print(with_pkgin)
@@ -12,6 +13,6 @@ def test_init(m_os, with_pkgin):
cfg = {}
distro = cloudinit.distros.netbsd.NetBSD("netbsd", cfg, None)
- expectation = ['pkgin', '-y', 'full-upgrade'] if with_pkgin else None
+ expectation = ["pkgin", "-y", "full-upgrade"] if with_pkgin else None
assert distro.pkg_cmd_upgrade_prefix == expectation
- assert [mock.call('/usr/pkg/bin/pkgin')] == m_os.path.exists.call_args_list
+ assert [mock.call("/usr/pkg/bin/pkgin")] == m_os.path.exists.call_args_list
diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/distros/test_netconfig.py
index a1df066a..a25be481 100644
--- a/tests/unittests/test_distros/test_netconfig.py
+++ b/tests/unittests/distros/test_netconfig.py
@@ -2,21 +2,16 @@
import copy
import os
+import re
from io import StringIO
from textwrap import dedent
from unittest import mock
-from cloudinit import distros
+from cloudinit import distros, helpers, safeyaml, settings, subp, util
from cloudinit.distros.parsers.sys_conf import SysConf
-from cloudinit import helpers
-from cloudinit import settings
-from cloudinit.tests.helpers import (
- FilesystemMockingTestCase, dir2dict)
-from cloudinit import subp
-from cloudinit import util
+from tests.unittests.helpers import FilesystemMockingTestCase, dir2dict
-
-BASE_NET_CFG = '''
+BASE_NET_CFG = """
auto lo
iface lo inet loopback
@@ -30,9 +25,9 @@ iface eth0 inet static
auto eth1
iface eth1 inet dhcp
-'''
+"""
-BASE_NET_CFG_FROM_V2 = '''
+BASE_NET_CFG_FROM_V2 = """
auto lo
iface lo inet loopback
@@ -43,9 +38,9 @@ iface eth0 inet static
auto eth1
iface eth1 inet dhcp
-'''
+"""
-BASE_NET_CFG_IPV6 = '''
+BASE_NET_CFG_IPV6 = """
auto lo
iface lo inet loopback
@@ -73,20 +68,49 @@ iface eth1 inet6 static
address 2607:f0d0:1002:0011::3
netmask 64
gateway 2607:f0d0:1002:0011::1
-'''
+"""
-V1_NET_CFG = {'config': [{'name': 'eth0',
+V1_NET_CFG = {
+ "config": [
+ {
+ "name": "eth0",
+ "subnets": [
+ {
+ "address": "192.168.1.5",
+ "broadcast": "192.168.1.0",
+ "gateway": "192.168.1.254",
+ "netmask": "255.255.255.0",
+ "type": "static",
+ }
+ ],
+ "type": "physical",
+ },
+ {
+ "name": "eth1",
+ "subnets": [{"control": "auto", "type": "dhcp4"}],
+ "type": "physical",
+ },
+ ],
+ "version": 1,
+}
- 'subnets': [{'address': '192.168.1.5',
- 'broadcast': '192.168.1.0',
- 'gateway': '192.168.1.254',
- 'netmask': '255.255.255.0',
- 'type': 'static'}],
- 'type': 'physical'},
- {'name': 'eth1',
- 'subnets': [{'control': 'auto', 'type': 'dhcp4'}],
- 'type': 'physical'}],
- 'version': 1}
+V1_NET_CFG_WITH_DUPS = """\
+# same value in interface specific dns and global dns
+# should produce single entry in network file
+version: 1
+config:
+ - type: physical
+ name: eth0
+ subnets:
+ - type: static
+ address: 192.168.0.102/24
+ dns_nameservers: [1.2.3.4]
+ dns_search: [test.com]
+ interface: eth0
+ - type: nameserver
+ address: [1.2.3.4]
+ search: [test.com]
+"""
V1_NET_CFG_OUTPUT = """\
# This file is generated from information provided by the datasource. Changes
@@ -125,19 +149,28 @@ auto eth1
iface eth1 inet dhcp
"""
-V1_NET_CFG_IPV6 = {'config': [{'name': 'eth0',
- 'subnets': [{'address':
- '2607:f0d0:1002:0011::2',
- 'gateway':
- '2607:f0d0:1002:0011::1',
- 'netmask': '64',
- 'type': 'static6'}],
- 'type': 'physical'},
- {'name': 'eth1',
- 'subnets': [{'control': 'auto',
- 'type': 'dhcp4'}],
- 'type': 'physical'}],
- 'version': 1}
+V1_NET_CFG_IPV6 = {
+ "config": [
+ {
+ "name": "eth0",
+ "subnets": [
+ {
+ "address": "2607:f0d0:1002:0011::2",
+ "gateway": "2607:f0d0:1002:0011::1",
+ "netmask": "64",
+ "type": "static6",
+ }
+ ],
+ "type": "physical",
+ },
+ {
+ "name": "eth1",
+ "subnets": [{"control": "auto", "type": "dhcp4"}],
+ "type": "physical",
+ },
+ ],
+ "version": 1,
+}
V1_TO_V2_NET_CFG_OUTPUT = """\
@@ -175,14 +208,11 @@ network:
"""
V2_NET_CFG = {
- 'ethernets': {
- 'eth7': {
- 'addresses': ['192.168.1.5/24'],
- 'gateway4': '192.168.1.254'},
- 'eth9': {
- 'dhcp4': True}
+ "ethernets": {
+ "eth7": {"addresses": ["192.168.1.5/24"], "gateway4": "192.168.1.254"},
+ "eth9": {"dhcp4": True},
},
- 'version': 2
+ "version": 2,
}
@@ -218,21 +248,18 @@ class WriteBuffer(object):
class TestNetCfgDistroBase(FilesystemMockingTestCase):
-
def setUp(self):
super(TestNetCfgDistroBase, self).setUp()
- self.add_patch('cloudinit.util.system_is_snappy', 'm_snappy')
- self.add_patch('cloudinit.util.system_info', 'm_sysinfo')
- self.m_sysinfo.return_value = {'dist': ('Distro', '99.1', 'Codename')}
+ self.add_patch("cloudinit.util.system_is_snappy", "m_snappy")
def _get_distro(self, dname, renderers=None):
cls = distros.fetch(dname)
cfg = settings.CFG_BUILTIN
- cfg['system_info']['distro'] = dname
+ cfg["system_info"]["distro"] = dname
if renderers:
- cfg['system_info']['network'] = {'renderers': renderers}
+ cfg["system_info"]["network"] = {"renderers": renderers}
paths = helpers.Paths({})
- return cls(dname, cfg.get('system_info'), paths)
+ return cls(dname, cfg.get("system_info"), paths)
def assertCfgEquals(self, blob1, blob2):
b1 = dict(SysConf(blob1.strip().splitlines()))
@@ -247,23 +274,23 @@ class TestNetCfgDistroBase(FilesystemMockingTestCase):
class TestNetCfgDistroFreeBSD(TestNetCfgDistroBase):
-
def setUp(self):
super(TestNetCfgDistroFreeBSD, self).setUp()
- self.distro = self._get_distro('freebsd', renderers=['freebsd'])
+ self.distro = self._get_distro("freebsd", renderers=["freebsd"])
- def _apply_and_verify_freebsd(self, apply_fn, config, expected_cfgs=None,
- bringup=False):
+ def _apply_and_verify_freebsd(
+ self, apply_fn, config, expected_cfgs=None, bringup=False
+ ):
if not expected_cfgs:
- raise ValueError('expected_cfg must not be None')
+ raise ValueError("expected_cfg must not be None")
tmpd = None
- with mock.patch('cloudinit.net.freebsd.available') as m_avail:
+ with mock.patch("cloudinit.net.freebsd.available") as m_avail:
m_avail.return_value = True
with self.reRooted(tmpd) as tmpd:
- util.ensure_dir('/etc')
- util.ensure_file('/etc/rc.conf')
- util.ensure_file('/etc/resolv.conf')
+ util.ensure_dir("/etc")
+ util.ensure_file("/etc/rc.conf")
+ util.ensure_file("/etc/resolv.conf")
apply_fn(config, bringup)
results = dir2dict(tmpd)
@@ -274,14 +301,14 @@ class TestNetCfgDistroFreeBSD(TestNetCfgDistroBase):
print(results[cfgpath])
print("----------")
self.assertEqual(
- set(expected.split('\n')),
- set(results[cfgpath].split('\n')))
+ set(expected.split("\n")), set(results[cfgpath].split("\n"))
+ )
self.assertEqual(0o644, get_mode(cfgpath, tmpd))
- @mock.patch('cloudinit.net.get_interfaces_by_mac')
+ @mock.patch("cloudinit.net.get_interfaces_by_mac")
def test_apply_network_config_freebsd_standard(self, ifaces_mac):
ifaces_mac.return_value = {
- '00:15:5d:4c:73:00': 'eth0',
+ "00:15:5d:4c:73:00": "eth0",
}
rc_conf_expected = """\
defaultrouter=192.168.1.254
@@ -290,17 +317,19 @@ ifconfig_eth1=DHCP
"""
expected_cfgs = {
- '/etc/rc.conf': rc_conf_expected,
- '/etc/resolv.conf': ''
+ "/etc/rc.conf": rc_conf_expected,
+ "/etc/resolv.conf": "",
}
- self._apply_and_verify_freebsd(self.distro.apply_network_config,
- V1_NET_CFG,
- expected_cfgs=expected_cfgs.copy())
+ self._apply_and_verify_freebsd(
+ self.distro.apply_network_config,
+ V1_NET_CFG,
+ expected_cfgs=expected_cfgs.copy(),
+ )
- @mock.patch('cloudinit.net.get_interfaces_by_mac')
+ @mock.patch("cloudinit.net.get_interfaces_by_mac")
def test_apply_network_config_freebsd_ifrename(self, ifaces_mac):
ifaces_mac.return_value = {
- '00:15:5d:4c:73:00': 'vtnet0',
+ "00:15:5d:4c:73:00": "vtnet0",
}
rc_conf_expected = """\
ifconfig_vtnet0_name=eth0
@@ -310,49 +339,51 @@ ifconfig_eth1=DHCP
"""
V1_NET_CFG_RENAME = copy.deepcopy(V1_NET_CFG)
- V1_NET_CFG_RENAME['config'][0]['mac_address'] = '00:15:5d:4c:73:00'
+ V1_NET_CFG_RENAME["config"][0]["mac_address"] = "00:15:5d:4c:73:00"
expected_cfgs = {
- '/etc/rc.conf': rc_conf_expected,
- '/etc/resolv.conf': ''
+ "/etc/rc.conf": rc_conf_expected,
+ "/etc/resolv.conf": "",
}
- self._apply_and_verify_freebsd(self.distro.apply_network_config,
- V1_NET_CFG_RENAME,
- expected_cfgs=expected_cfgs.copy())
+ self._apply_and_verify_freebsd(
+ self.distro.apply_network_config,
+ V1_NET_CFG_RENAME,
+ expected_cfgs=expected_cfgs.copy(),
+ )
- @mock.patch('cloudinit.net.get_interfaces_by_mac')
+ @mock.patch("cloudinit.net.get_interfaces_by_mac")
def test_apply_network_config_freebsd_nameserver(self, ifaces_mac):
ifaces_mac.return_value = {
- '00:15:5d:4c:73:00': 'eth0',
+ "00:15:5d:4c:73:00": "eth0",
}
V1_NET_CFG_DNS = copy.deepcopy(V1_NET_CFG)
- ns = ['1.2.3.4']
- V1_NET_CFG_DNS['config'][0]['subnets'][0]['dns_nameservers'] = ns
- expected_cfgs = {
- '/etc/resolv.conf': 'nameserver 1.2.3.4\n'
- }
- self._apply_and_verify_freebsd(self.distro.apply_network_config,
- V1_NET_CFG_DNS,
- expected_cfgs=expected_cfgs.copy())
+ ns = ["1.2.3.4"]
+ V1_NET_CFG_DNS["config"][0]["subnets"][0]["dns_nameservers"] = ns
+ expected_cfgs = {"/etc/resolv.conf": "nameserver 1.2.3.4\n"}
+ self._apply_and_verify_freebsd(
+ self.distro.apply_network_config,
+ V1_NET_CFG_DNS,
+ expected_cfgs=expected_cfgs.copy(),
+ )
class TestNetCfgDistroUbuntuEni(TestNetCfgDistroBase):
-
def setUp(self):
super(TestNetCfgDistroUbuntuEni, self).setUp()
- self.distro = self._get_distro('ubuntu', renderers=['eni'])
+ self.distro = self._get_distro("ubuntu", renderers=["eni"])
def eni_path(self):
- return '/etc/network/interfaces.d/50-cloud-init.cfg'
+ return "/etc/network/interfaces.d/50-cloud-init.cfg"
- def _apply_and_verify_eni(self, apply_fn, config, expected_cfgs=None,
- bringup=False):
+ def _apply_and_verify_eni(
+ self, apply_fn, config, expected_cfgs=None, bringup=False
+ ):
if not expected_cfgs:
- raise ValueError('expected_cfg must not be None')
+ raise ValueError("expected_cfg must not be None")
tmpd = None
- with mock.patch('cloudinit.net.eni.available') as m_avail:
+ with mock.patch("cloudinit.net.eni.available") as m_avail:
m_avail.return_value = True
with self.reRooted(tmpd) as tmpd:
apply_fn(config, bringup)
@@ -372,35 +403,39 @@ class TestNetCfgDistroUbuntuEni(TestNetCfgDistroBase):
self.eni_path(): V1_NET_CFG_OUTPUT,
}
# ub_distro.apply_network_config(V1_NET_CFG, False)
- self._apply_and_verify_eni(self.distro.apply_network_config,
- V1_NET_CFG,
- expected_cfgs=expected_cfgs.copy())
+ self._apply_and_verify_eni(
+ self.distro.apply_network_config,
+ V1_NET_CFG,
+ expected_cfgs=expected_cfgs.copy(),
+ )
def test_apply_network_config_ipv6_ub(self):
- expected_cfgs = {
- self.eni_path(): V1_NET_CFG_IPV6_OUTPUT
- }
- self._apply_and_verify_eni(self.distro.apply_network_config,
- V1_NET_CFG_IPV6,
- expected_cfgs=expected_cfgs.copy())
+ expected_cfgs = {self.eni_path(): V1_NET_CFG_IPV6_OUTPUT}
+ self._apply_and_verify_eni(
+ self.distro.apply_network_config,
+ V1_NET_CFG_IPV6,
+ expected_cfgs=expected_cfgs.copy(),
+ )
class TestNetCfgDistroUbuntuNetplan(TestNetCfgDistroBase):
def setUp(self):
super(TestNetCfgDistroUbuntuNetplan, self).setUp()
- self.distro = self._get_distro('ubuntu', renderers=['netplan'])
- self.devlist = ['eth0', 'lo']
+ self.distro = self._get_distro("ubuntu", renderers=["netplan"])
+ self.devlist = ["eth0", "lo"]
- def _apply_and_verify_netplan(self, apply_fn, config, expected_cfgs=None,
- bringup=False):
+ def _apply_and_verify_netplan(
+ self, apply_fn, config, expected_cfgs=None, bringup=False
+ ):
if not expected_cfgs:
- raise ValueError('expected_cfg must not be None')
+ raise ValueError("expected_cfg must not be None")
tmpd = None
- with mock.patch('cloudinit.net.netplan.available',
- return_value=True):
- with mock.patch("cloudinit.net.netplan.get_devicelist",
- return_value=self.devlist):
+ with mock.patch("cloudinit.net.netplan.available", return_value=True):
+ with mock.patch(
+ "cloudinit.net.netplan.get_devicelist",
+ return_value=self.devlist,
+ ):
with self.reRooted(tmpd) as tmpd:
apply_fn(config, bringup)
@@ -415,7 +450,7 @@ class TestNetCfgDistroUbuntuNetplan(TestNetCfgDistroBase):
self.assertEqual(0o644, get_mode(cfgpath, tmpd))
def netplan_path(self):
- return '/etc/netplan/50-cloud-init.yaml'
+ return "/etc/netplan/50-cloud-init.yaml"
def test_apply_network_config_v1_to_netplan_ub(self):
expected_cfgs = {
@@ -423,9 +458,11 @@ class TestNetCfgDistroUbuntuNetplan(TestNetCfgDistroBase):
}
# ub_distro.apply_network_config(V1_NET_CFG, False)
- self._apply_and_verify_netplan(self.distro.apply_network_config,
- V1_NET_CFG,
- expected_cfgs=expected_cfgs.copy())
+ self._apply_and_verify_netplan(
+ self.distro.apply_network_config,
+ V1_NET_CFG,
+ expected_cfgs=expected_cfgs.copy(),
+ )
def test_apply_network_config_v1_ipv6_to_netplan_ub(self):
expected_cfgs = {
@@ -433,39 +470,43 @@ class TestNetCfgDistroUbuntuNetplan(TestNetCfgDistroBase):
}
# ub_distro.apply_network_config(V1_NET_CFG_IPV6, False)
- self._apply_and_verify_netplan(self.distro.apply_network_config,
- V1_NET_CFG_IPV6,
- expected_cfgs=expected_cfgs.copy())
+ self._apply_and_verify_netplan(
+ self.distro.apply_network_config,
+ V1_NET_CFG_IPV6,
+ expected_cfgs=expected_cfgs.copy(),
+ )
def test_apply_network_config_v2_passthrough_ub(self):
expected_cfgs = {
self.netplan_path(): V2_TO_V2_NET_CFG_OUTPUT,
}
# ub_distro.apply_network_config(V2_NET_CFG, False)
- self._apply_and_verify_netplan(self.distro.apply_network_config,
- V2_NET_CFG,
- expected_cfgs=expected_cfgs.copy())
+ self._apply_and_verify_netplan(
+ self.distro.apply_network_config,
+ V2_NET_CFG,
+ expected_cfgs=expected_cfgs.copy(),
+ )
class TestNetCfgDistroRedhat(TestNetCfgDistroBase):
-
def setUp(self):
super(TestNetCfgDistroRedhat, self).setUp()
- self.distro = self._get_distro('rhel', renderers=['sysconfig'])
+ self.distro = self._get_distro("rhel", renderers=["sysconfig"])
def ifcfg_path(self, ifname):
- return '/etc/sysconfig/network-scripts/ifcfg-%s' % ifname
+ return "/etc/sysconfig/network-scripts/ifcfg-%s" % ifname
def control_path(self):
- return '/etc/sysconfig/network'
+ return "/etc/sysconfig/network"
- def _apply_and_verify(self, apply_fn, config, expected_cfgs=None,
- bringup=False):
+ def _apply_and_verify(
+ self, apply_fn, config, expected_cfgs=None, bringup=False
+ ):
if not expected_cfgs:
- raise ValueError('expected_cfg must not be None')
+ raise ValueError("expected_cfg must not be None")
tmpd = None
- with mock.patch('cloudinit.net.sysconfig.available') as m_avail:
+ with mock.patch("cloudinit.net.sysconfig.available") as m_avail:
m_avail.return_value = True
with self.reRooted(tmpd) as tmpd:
apply_fn(config, bringup)
@@ -477,7 +518,8 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase):
def test_apply_network_config_rh(self):
expected_cfgs = {
- self.ifcfg_path('eth0'): dedent("""\
+ self.ifcfg_path("eth0"): dedent(
+ """\
BOOTPROTO=none
DEFROUTE=yes
DEVICE=eth0
@@ -488,27 +530,35 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase):
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
- """),
- self.ifcfg_path('eth1'): dedent("""\
+ """
+ ),
+ self.ifcfg_path("eth1"): dedent(
+ """\
BOOTPROTO=dhcp
DEVICE=eth1
NM_CONTROLLED=no
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
- """),
- self.control_path(): dedent("""\
+ """
+ ),
+ self.control_path(): dedent(
+ """\
NETWORKING=yes
- """),
+ """
+ ),
}
# rh_distro.apply_network_config(V1_NET_CFG, False)
- self._apply_and_verify(self.distro.apply_network_config,
- V1_NET_CFG,
- expected_cfgs=expected_cfgs.copy())
+ self._apply_and_verify(
+ self.distro.apply_network_config,
+ V1_NET_CFG,
+ expected_cfgs=expected_cfgs.copy(),
+ )
def test_apply_network_config_ipv6_rh(self):
expected_cfgs = {
- self.ifcfg_path('eth0'): dedent("""\
+ self.ifcfg_path("eth0"): dedent(
+ """\
BOOTPROTO=none
DEFROUTE=yes
DEVICE=eth0
@@ -521,39 +571,54 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase):
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
- """),
- self.ifcfg_path('eth1'): dedent("""\
+ """
+ ),
+ self.ifcfg_path("eth1"): dedent(
+ """\
BOOTPROTO=dhcp
DEVICE=eth1
NM_CONTROLLED=no
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
- """),
- self.control_path(): dedent("""\
+ """
+ ),
+ self.control_path(): dedent(
+ """\
NETWORKING=yes
NETWORKING_IPV6=yes
IPV6_AUTOCONF=no
- """),
+ """
+ ),
}
# rh_distro.apply_network_config(V1_NET_CFG_IPV6, False)
- self._apply_and_verify(self.distro.apply_network_config,
- V1_NET_CFG_IPV6,
- expected_cfgs=expected_cfgs.copy())
+ self._apply_and_verify(
+ self.distro.apply_network_config,
+ V1_NET_CFG_IPV6,
+ expected_cfgs=expected_cfgs.copy(),
+ )
def test_vlan_render_unsupported(self):
"""Render officially unsupported vlan names."""
cfg = {
- 'version': 2,
- 'ethernets': {
- 'eth0': {'addresses': ["192.10.1.2/24"],
- 'match': {'macaddress': "00:16:3e:60:7c:df"}}},
- 'vlans': {
- 'infra0': {'addresses': ["10.0.1.2/16"],
- 'id': 1001, 'link': 'eth0'}},
+ "version": 2,
+ "ethernets": {
+ "eth0": {
+ "addresses": ["192.10.1.2/24"],
+ "match": {"macaddress": "00:16:3e:60:7c:df"},
+ }
+ },
+ "vlans": {
+ "infra0": {
+ "addresses": ["10.0.1.2/16"],
+ "id": 1001,
+ "link": "eth0",
+ }
+ },
}
expected_cfgs = {
- self.ifcfg_path('eth0'): dedent("""\
+ self.ifcfg_path("eth0"): dedent(
+ """\
BOOTPROTO=none
DEVICE=eth0
HWADDR=00:16:3e:60:7c:df
@@ -563,8 +628,10 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase):
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
- """),
- self.ifcfg_path('infra0'): dedent("""\
+ """
+ ),
+ self.ifcfg_path("infra0"): dedent(
+ """\
BOOTPROTO=none
DEVICE=infra0
IPADDR=10.0.1.2
@@ -574,26 +641,33 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase):
PHYSDEV=eth0
USERCTL=no
VLAN=yes
- """),
- self.control_path(): dedent("""\
+ """
+ ),
+ self.control_path(): dedent(
+ """\
NETWORKING=yes
- """),
+ """
+ ),
}
self._apply_and_verify(
- self.distro.apply_network_config, cfg,
- expected_cfgs=expected_cfgs)
+ self.distro.apply_network_config, cfg, expected_cfgs=expected_cfgs
+ )
def test_vlan_render(self):
cfg = {
- 'version': 2,
- 'ethernets': {
- 'eth0': {'addresses': ["192.10.1.2/24"]}},
- 'vlans': {
- 'eth0.1001': {'addresses': ["10.0.1.2/16"],
- 'id': 1001, 'link': 'eth0'}},
+ "version": 2,
+ "ethernets": {"eth0": {"addresses": ["192.10.1.2/24"]}},
+ "vlans": {
+ "eth0.1001": {
+ "addresses": ["10.0.1.2/16"],
+ "id": 1001,
+ "link": "eth0",
+ }
+ },
}
expected_cfgs = {
- self.ifcfg_path('eth0'): dedent("""\
+ self.ifcfg_path("eth0"): dedent(
+ """\
BOOTPROTO=none
DEVICE=eth0
IPADDR=192.10.1.2
@@ -602,8 +676,10 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase):
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
- """),
- self.ifcfg_path('eth0.1001'): dedent("""\
+ """
+ ),
+ self.ifcfg_path("eth0.1001"): dedent(
+ """\
BOOTPROTO=none
DEVICE=eth0.1001
IPADDR=10.0.1.2
@@ -613,32 +689,35 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase):
PHYSDEV=eth0
USERCTL=no
VLAN=yes
- """),
- self.control_path(): dedent("""\
+ """
+ ),
+ self.control_path(): dedent(
+ """\
NETWORKING=yes
- """),
+ """
+ ),
}
self._apply_and_verify(
- self.distro.apply_network_config, cfg,
- expected_cfgs=expected_cfgs)
+ self.distro.apply_network_config, cfg, expected_cfgs=expected_cfgs
+ )
class TestNetCfgDistroOpensuse(TestNetCfgDistroBase):
-
def setUp(self):
super(TestNetCfgDistroOpensuse, self).setUp()
- self.distro = self._get_distro('opensuse', renderers=['sysconfig'])
+ self.distro = self._get_distro("opensuse", renderers=["sysconfig"])
def ifcfg_path(self, ifname):
- return '/etc/sysconfig/network/ifcfg-%s' % ifname
+ return "/etc/sysconfig/network/ifcfg-%s" % ifname
- def _apply_and_verify(self, apply_fn, config, expected_cfgs=None,
- bringup=False):
+ def _apply_and_verify(
+ self, apply_fn, config, expected_cfgs=None, bringup=False
+ ):
if not expected_cfgs:
- raise ValueError('expected_cfg must not be None')
+ raise ValueError("expected_cfg must not be None")
tmpd = None
- with mock.patch('cloudinit.net.sysconfig.available') as m_avail:
+ with mock.patch("cloudinit.net.sysconfig.available") as m_avail:
m_avail.return_value = True
with self.reRooted(tmpd) as tmpd:
apply_fn(config, bringup)
@@ -651,52 +730,71 @@ class TestNetCfgDistroOpensuse(TestNetCfgDistroBase):
def test_apply_network_config_opensuse(self):
"""Opensuse uses apply_network_config and renders sysconfig"""
expected_cfgs = {
- self.ifcfg_path('eth0'): dedent("""\
+ self.ifcfg_path("eth0"): dedent(
+ """\
BOOTPROTO=static
IPADDR=192.168.1.5
NETMASK=255.255.255.0
STARTMODE=auto
- """),
- self.ifcfg_path('eth1'): dedent("""\
+ """
+ ),
+ self.ifcfg_path("eth1"): dedent(
+ """\
BOOTPROTO=dhcp4
STARTMODE=auto
- """),
+ """
+ ),
}
- self._apply_and_verify(self.distro.apply_network_config,
- V1_NET_CFG,
- expected_cfgs=expected_cfgs.copy())
+ self._apply_and_verify(
+ self.distro.apply_network_config,
+ V1_NET_CFG,
+ expected_cfgs=expected_cfgs.copy(),
+ )
def test_apply_network_config_ipv6_opensuse(self):
"""Opensuse uses apply_network_config and renders sysconfig w/ipv6"""
expected_cfgs = {
- self.ifcfg_path('eth0'): dedent("""\
+ self.ifcfg_path("eth0"): dedent(
+ """\
BOOTPROTO=static
IPADDR6=2607:f0d0:1002:0011::2/64
STARTMODE=auto
- """),
- self.ifcfg_path('eth1'): dedent("""\
+ """
+ ),
+ self.ifcfg_path("eth1"): dedent(
+ """\
BOOTPROTO=dhcp4
STARTMODE=auto
- """),
+ """
+ ),
}
- self._apply_and_verify(self.distro.apply_network_config,
- V1_NET_CFG_IPV6,
- expected_cfgs=expected_cfgs.copy())
+ self._apply_and_verify(
+ self.distro.apply_network_config,
+ V1_NET_CFG_IPV6,
+ expected_cfgs=expected_cfgs.copy(),
+ )
class TestNetCfgDistroArch(TestNetCfgDistroBase):
def setUp(self):
super(TestNetCfgDistroArch, self).setUp()
- self.distro = self._get_distro('arch', renderers=['netplan'])
-
- def _apply_and_verify(self, apply_fn, config, expected_cfgs=None,
- bringup=False, with_netplan=False):
+ self.distro = self._get_distro("arch", renderers=["netplan"])
+
+ def _apply_and_verify(
+ self,
+ apply_fn,
+ config,
+ expected_cfgs=None,
+ bringup=False,
+ with_netplan=False,
+ ):
if not expected_cfgs:
- raise ValueError('expected_cfg must not be None')
+ raise ValueError("expected_cfg must not be None")
tmpd = None
- with mock.patch('cloudinit.net.netplan.available',
- return_value=with_netplan):
+ with mock.patch(
+ "cloudinit.net.netplan.available", return_value=with_netplan
+ ):
with self.reRooted(tmpd) as tmpd:
apply_fn(config, bringup)
@@ -711,10 +809,10 @@ class TestNetCfgDistroArch(TestNetCfgDistroBase):
self.assertEqual(0o644, get_mode(cfgpath, tmpd))
def netctl_path(self, iface):
- return '/etc/netctl/%s' % iface
+ return "/etc/netctl/%s" % iface
def netplan_path(self):
- return '/etc/netplan/50-cloud-init.yaml'
+ return "/etc/netplan/50-cloud-init.yaml"
def test_apply_network_config_v1_without_netplan(self):
# Note that this is in fact an invalid netctl config:
@@ -724,33 +822,40 @@ class TestNetCfgDistroArch(TestNetCfgDistroBase):
# still being used in absence of netplan, not the correctness of the
# rendered netctl config.
expected_cfgs = {
- self.netctl_path('eth0'): dedent("""\
+ self.netctl_path("eth0"): dedent(
+ """\
Address=192.168.1.5/255.255.255.0
Connection=ethernet
DNS=()
Gateway=192.168.1.254
IP=static
Interface=eth0
- """),
- self.netctl_path('eth1'): dedent("""\
+ """
+ ),
+ self.netctl_path("eth1"): dedent(
+ """\
Address=None/None
Connection=ethernet
DNS=()
Gateway=
IP=dhcp
Interface=eth1
- """),
+ """
+ ),
}
# ub_distro.apply_network_config(V1_NET_CFG, False)
- self._apply_and_verify(self.distro.apply_network_config,
- V1_NET_CFG,
- expected_cfgs=expected_cfgs.copy(),
- with_netplan=False)
+ self._apply_and_verify(
+ self.distro.apply_network_config,
+ V1_NET_CFG,
+ expected_cfgs=expected_cfgs.copy(),
+ with_netplan=False,
+ )
def test_apply_network_config_v1_with_netplan(self):
expected_cfgs = {
- self.netplan_path(): dedent("""\
+ self.netplan_path(): dedent(
+ """\
# generated by cloud-init
network:
version: 2
@@ -761,17 +866,148 @@ class TestNetCfgDistroArch(TestNetCfgDistroBase):
gateway4: 192.168.1.254
eth1:
dhcp4: true
- """),
+ """
+ ),
}
- with mock.patch('cloudinit.util.is_FreeBSD', return_value=False):
- self._apply_and_verify(self.distro.apply_network_config,
- V1_NET_CFG,
- expected_cfgs=expected_cfgs.copy(),
- with_netplan=True)
+ with mock.patch(
+ "cloudinit.net.netplan.get_devicelist", return_value=[]
+ ):
+ self._apply_and_verify(
+ self.distro.apply_network_config,
+ V1_NET_CFG,
+ expected_cfgs=expected_cfgs.copy(),
+ with_netplan=True,
+ )
+
+
+class TestNetCfgDistroPhoton(TestNetCfgDistroBase):
+ def setUp(self):
+ super(TestNetCfgDistroPhoton, self).setUp()
+ self.distro = self._get_distro("photon", renderers=["networkd"])
+
+ def create_conf_dict(self, contents):
+ content_dict = {}
+ for line in contents:
+ if line:
+ line = line.strip()
+ if line and re.search(r"^\[(.+)\]$", line):
+ content_dict[line] = []
+ key = line
+ elif line:
+ assert key
+ content_dict[key].append(line)
+
+ return content_dict
+
+ def compare_dicts(self, actual, expected):
+ for k, v in actual.items():
+ self.assertEqual(sorted(expected[k]), sorted(v))
+
+ def _apply_and_verify(
+ self, apply_fn, config, expected_cfgs=None, bringup=False
+ ):
+ if not expected_cfgs:
+ raise ValueError("expected_cfg must not be None")
+
+ tmpd = None
+ with mock.patch("cloudinit.net.networkd.available") as m_avail:
+ m_avail.return_value = True
+ with self.reRooted(tmpd) as tmpd:
+ apply_fn(config, bringup)
+
+ results = dir2dict(tmpd)
+ for cfgpath, expected in expected_cfgs.items():
+ actual = self.create_conf_dict(results[cfgpath].splitlines())
+ self.compare_dicts(actual, expected)
+ self.assertEqual(0o644, get_mode(cfgpath, tmpd))
+
+ def nwk_file_path(self, ifname):
+ return "/etc/systemd/network/10-cloud-init-%s.network" % ifname
+
+ def net_cfg_1(self, ifname):
+ ret = (
+ """\
+ [Match]
+ Name=%s
+ [Network]
+ DHCP=no
+ [Address]
+ Address=192.168.1.5/24
+ [Route]
+ Gateway=192.168.1.254"""
+ % ifname
+ )
+ return ret
+
+ def net_cfg_2(self, ifname):
+ ret = (
+ """\
+ [Match]
+ Name=%s
+ [Network]
+ DHCP=ipv4"""
+ % ifname
+ )
+ return ret
+
+ def test_photon_network_config_v1(self):
+ tmp = self.net_cfg_1("eth0").splitlines()
+ expected_eth0 = self.create_conf_dict(tmp)
+
+ tmp = self.net_cfg_2("eth1").splitlines()
+ expected_eth1 = self.create_conf_dict(tmp)
+
+ expected_cfgs = {
+ self.nwk_file_path("eth0"): expected_eth0,
+ self.nwk_file_path("eth1"): expected_eth1,
+ }
+
+ self._apply_and_verify(
+ self.distro.apply_network_config, V1_NET_CFG, expected_cfgs.copy()
+ )
+
+ def test_photon_network_config_v2(self):
+ tmp = self.net_cfg_1("eth7").splitlines()
+ expected_eth7 = self.create_conf_dict(tmp)
+
+ tmp = self.net_cfg_2("eth9").splitlines()
+ expected_eth9 = self.create_conf_dict(tmp)
+
+ expected_cfgs = {
+ self.nwk_file_path("eth7"): expected_eth7,
+ self.nwk_file_path("eth9"): expected_eth9,
+ }
+
+ self._apply_and_verify(
+ self.distro.apply_network_config, V2_NET_CFG, expected_cfgs.copy()
+ )
+
+ def test_photon_network_config_v1_with_duplicates(self):
+ expected = """\
+ [Match]
+ Name=eth0
+ [Network]
+ DHCP=no
+ DNS=1.2.3.4
+ Domains=test.com
+ [Address]
+ Address=192.168.0.102/24"""
+
+ net_cfg = safeyaml.load(V1_NET_CFG_WITH_DUPS)
+
+ expected = self.create_conf_dict(expected.splitlines())
+ expected_cfgs = {
+ self.nwk_file_path("eth0"): expected,
+ }
+
+ self._apply_and_verify(
+ self.distro.apply_network_config, net_cfg, expected_cfgs.copy()
+ )
def get_mode(path, target=None):
return os.stat(subp.target_path(target, path)).st_mode & 0o777
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/distros/test_networking.py b/tests/unittests/distros/test_networking.py
new file mode 100644
index 00000000..274647cb
--- /dev/null
+++ b/tests/unittests/distros/test_networking.py
@@ -0,0 +1,231 @@
+# See https://docs.pytest.org/en/stable/example
+# /parametrize.html#parametrizing-conditional-raising
+from contextlib import ExitStack as does_not_raise
+from unittest import mock
+
+import pytest
+
+from cloudinit import net
+from cloudinit.distros.networking import (
+ BSDNetworking,
+ LinuxNetworking,
+ Networking,
+)
+
+
+@pytest.fixture
+def generic_networking_cls():
+ """Returns a direct Networking subclass which errors on /sys usage.
+
+ This enables the direct testing of functionality only present on the
+ ``Networking`` super-class, and provides a check on accidentally using /sys
+ in that context.
+ """
+
+ class TestNetworking(Networking):
+ def is_physical(self, *args, **kwargs):
+ raise NotImplementedError
+
+ def settle(self, *args, **kwargs):
+ raise NotImplementedError
+
+ def try_set_link_up(self, *args, **kwargs):
+ raise NotImplementedError
+
+ error = AssertionError("Unexpectedly used /sys in generic networking code")
+ with mock.patch(
+ "cloudinit.net.get_sys_class_path",
+ side_effect=error,
+ ):
+ yield TestNetworking
+
+
+@pytest.fixture
+def sys_class_net(tmpdir):
+ sys_class_net_path = tmpdir.join("sys/class/net")
+ sys_class_net_path.ensure_dir()
+ with mock.patch(
+ "cloudinit.net.get_sys_class_path",
+ return_value=sys_class_net_path.strpath + "/",
+ ):
+ yield sys_class_net_path
+
+
+class TestBSDNetworkingIsPhysical:
+ def test_raises_notimplementederror(self):
+ with pytest.raises(NotImplementedError):
+ BSDNetworking().is_physical("eth0")
+
+
+class TestLinuxNetworkingIsPhysical:
+ def test_returns_false_by_default(self, sys_class_net):
+ assert not LinuxNetworking().is_physical("eth0")
+
+ def test_returns_false_if_devname_exists_but_not_physical(
+ self, sys_class_net
+ ):
+ devname = "eth0"
+ sys_class_net.join(devname).mkdir()
+ assert not LinuxNetworking().is_physical(devname)
+
+ def test_returns_true_if_device_is_physical(self, sys_class_net):
+ devname = "eth0"
+ device_dir = sys_class_net.join(devname)
+ device_dir.mkdir()
+ device_dir.join("device").write("")
+
+ assert LinuxNetworking().is_physical(devname)
+
+
+class TestBSDNetworkingTrySetLinkUp:
+ def test_raises_notimplementederror(self):
+ with pytest.raises(NotImplementedError):
+ BSDNetworking().try_set_link_up("eth0")
+
+
+@mock.patch("cloudinit.net.is_up")
+@mock.patch("cloudinit.distros.networking.subp.subp")
+class TestLinuxNetworkingTrySetLinkUp:
+ def test_calls_subp_return_true(self, m_subp, m_is_up):
+ devname = "eth0"
+ m_is_up.return_value = True
+ is_success = LinuxNetworking().try_set_link_up(devname)
+
+ assert (
+ mock.call(["ip", "link", "set", devname, "up"])
+ == m_subp.call_args_list[-1]
+ )
+ assert is_success
+
+ def test_calls_subp_return_false(self, m_subp, m_is_up):
+ devname = "eth0"
+ m_is_up.return_value = False
+ is_success = LinuxNetworking().try_set_link_up(devname)
+
+ assert (
+ mock.call(["ip", "link", "set", devname, "up"])
+ == m_subp.call_args_list[-1]
+ )
+ assert not is_success
+
+
+class TestBSDNetworkingSettle:
+ def test_settle_doesnt_error(self):
+ # This also implicitly tests that it doesn't use subp.subp
+ BSDNetworking().settle()
+
+
+@pytest.mark.usefixtures("sys_class_net")
+@mock.patch("cloudinit.distros.networking.util.udevadm_settle", autospec=True)
+class TestLinuxNetworkingSettle:
+ def test_no_arguments(self, m_udevadm_settle):
+ LinuxNetworking().settle()
+
+ assert [mock.call(exists=None)] == m_udevadm_settle.call_args_list
+
+ def test_exists_argument(self, m_udevadm_settle):
+ LinuxNetworking().settle(exists="ens3")
+
+ expected_path = net.sys_dev_path("ens3")
+ assert [
+ mock.call(exists=expected_path)
+ ] == m_udevadm_settle.call_args_list
+
+
+class TestNetworkingWaitForPhysDevs:
+ @pytest.fixture
+ def wait_for_physdevs_netcfg(self):
+ """This config is shared across all the tests in this class."""
+
+ def ethernet(mac, name, driver=None, device_id=None):
+ v2_cfg = {"set-name": name, "match": {"macaddress": mac}}
+ if driver:
+ v2_cfg["match"].update({"driver": driver})
+ if device_id:
+ v2_cfg["match"].update({"device_id": device_id})
+
+ return v2_cfg
+
+ physdevs = [
+ ["aa:bb:cc:dd:ee:ff", "eth0", "virtio", "0x1000"],
+ ["00:11:22:33:44:55", "ens3", "e1000", "0x1643"],
+ ]
+ netcfg = {
+ "version": 2,
+ "ethernets": {args[1]: ethernet(*args) for args in physdevs},
+ }
+ return netcfg
+
+ def test_skips_settle_if_all_present(
+ self,
+ generic_networking_cls,
+ wait_for_physdevs_netcfg,
+ ):
+ networking = generic_networking_cls()
+ with mock.patch.object(
+ networking, "get_interfaces_by_mac"
+ ) as m_get_interfaces_by_mac:
+ m_get_interfaces_by_mac.side_effect = iter(
+ [{"aa:bb:cc:dd:ee:ff": "eth0", "00:11:22:33:44:55": "ens3"}]
+ )
+ with mock.patch.object(
+ networking, "settle", autospec=True
+ ) as m_settle:
+ networking.wait_for_physdevs(wait_for_physdevs_netcfg)
+ assert 0 == m_settle.call_count
+
+ def test_calls_udev_settle_on_missing(
+ self,
+ generic_networking_cls,
+ wait_for_physdevs_netcfg,
+ ):
+ networking = generic_networking_cls()
+ with mock.patch.object(
+ networking, "get_interfaces_by_mac"
+ ) as m_get_interfaces_by_mac:
+ m_get_interfaces_by_mac.side_effect = iter(
+ [
+ {
+ "aa:bb:cc:dd:ee:ff": "eth0"
+ }, # first call ens3 is missing
+ {
+ "aa:bb:cc:dd:ee:ff": "eth0",
+ "00:11:22:33:44:55": "ens3",
+ }, # second call has both
+ ]
+ )
+ with mock.patch.object(
+ networking, "settle", autospec=True
+ ) as m_settle:
+ networking.wait_for_physdevs(wait_for_physdevs_netcfg)
+ m_settle.assert_called_with(exists="ens3")
+
+ @pytest.mark.parametrize(
+ "strict,expectation",
+ [(True, pytest.raises(RuntimeError)), (False, does_not_raise())],
+ )
+ def test_retrying_and_strict_behaviour(
+ self,
+ strict,
+ expectation,
+ generic_networking_cls,
+ wait_for_physdevs_netcfg,
+ ):
+ networking = generic_networking_cls()
+ with mock.patch.object(
+ networking, "get_interfaces_by_mac"
+ ) as m_get_interfaces_by_mac:
+ m_get_interfaces_by_mac.return_value = {}
+
+ with mock.patch.object(
+ networking, "settle", autospec=True
+ ) as m_settle:
+ with expectation:
+ networking.wait_for_physdevs(
+ wait_for_physdevs_netcfg, strict=strict
+ )
+
+ assert (
+ 5 * len(wait_for_physdevs_netcfg["ethernets"])
+ == m_settle.call_count
+ )
diff --git a/tests/unittests/test_distros/test_opensuse.py b/tests/unittests/distros/test_opensuse.py
index b9bb9b3e..4a4b266f 100644
--- a/tests/unittests/test_distros/test_opensuse.py
+++ b/tests/unittests/distros/test_opensuse.py
@@ -1,12 +1,11 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.tests.helpers import CiTestCase
+from tests.unittests.helpers import CiTestCase
from . import _get_distro
class TestopenSUSE(CiTestCase):
-
def test_get_distro(self):
distro = _get_distro("opensuse")
- self.assertEqual(distro.osfamily, 'suse')
+ self.assertEqual(distro.osfamily, "suse")
diff --git a/tests/unittests/distros/test_photon.py b/tests/unittests/distros/test_photon.py
new file mode 100644
index 00000000..fed30c2b
--- /dev/null
+++ b/tests/unittests/distros/test_photon.py
@@ -0,0 +1,68 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit import util
+from tests.unittests.helpers import CiTestCase, mock
+
+from . import _get_distro
+
+SYSTEM_INFO = {
+ "paths": {
+ "cloud_dir": "/var/lib/cloud/",
+ "templates_dir": "/etc/cloud/templates/",
+ },
+ "network": {"renderers": "networkd"},
+}
+
+
+class TestPhoton(CiTestCase):
+ with_logs = True
+ distro = _get_distro("photon", SYSTEM_INFO)
+ expected_log_line = "Rely on PhotonOS default network config"
+
+ def test_network_renderer(self):
+ self.assertEqual(self.distro._cfg["network"]["renderers"], "networkd")
+
+ def test_get_distro(self):
+ self.assertEqual(self.distro.osfamily, "photon")
+
+ @mock.patch("cloudinit.distros.photon.subp.subp")
+ def test_write_hostname(self, m_subp):
+ hostname = "myhostname"
+ hostfile = self.tmp_path("previous-hostname")
+ self.distro._write_hostname(hostname, hostfile)
+ self.assertEqual(hostname, util.load_file(hostfile))
+
+ ret = self.distro._read_hostname(hostfile)
+ self.assertEqual(ret, hostname)
+
+ m_subp.return_value = (None, None)
+ hostfile += "hostfile"
+ self.distro._write_hostname(hostname, hostfile)
+
+ m_subp.return_value = (hostname, None)
+ ret = self.distro._read_hostname(hostfile)
+ self.assertEqual(ret, hostname)
+
+ self.logs.truncate(0)
+ m_subp.return_value = (None, "bla")
+ self.distro._write_hostname(hostname, None)
+ self.assertIn("Error while setting hostname", self.logs.getvalue())
+
+ @mock.patch("cloudinit.net.generate_fallback_config")
+ def test_fallback_netcfg(self, m_fallback_cfg):
+
+ key = "disable_fallback_netcfg"
+ # Don't use fallback if no setting given
+ self.logs.truncate(0)
+ assert self.distro.generate_fallback_config() is None
+ self.assertIn(self.expected_log_line, self.logs.getvalue())
+
+ self.logs.truncate(0)
+ self.distro._cfg[key] = True
+ assert self.distro.generate_fallback_config() is None
+ self.assertIn(self.expected_log_line, self.logs.getvalue())
+
+ self.logs.truncate(0)
+ self.distro._cfg[key] = False
+ assert self.distro.generate_fallback_config() is not None
+ self.assertNotIn(self.expected_log_line, self.logs.getvalue())
diff --git a/tests/unittests/test_distros/test_resolv.py b/tests/unittests/distros/test_resolv.py
index 7d940750..65e78101 100644
--- a/tests/unittests/test_distros/test_resolv.py
+++ b/tests/unittests/distros/test_resolv.py
@@ -1,18 +1,16 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.distros.parsers import resolv_conf
-
-from cloudinit.tests.helpers import TestCase
-
import re
+from cloudinit.distros.parsers import resolv_conf
+from tests.unittests.helpers import TestCase
-BASE_RESOLVE = '''
+BASE_RESOLVE = """
; generated by /sbin/dhclient-script
search blah.yahoo.com yahoo.com
nameserver 10.15.44.14
nameserver 10.15.30.92
-'''
+"""
BASE_RESOLVE = BASE_RESOLVE.strip()
@@ -27,39 +25,40 @@ class TestResolvHelper(TestCase):
self.assertIsNone(rp.local_domain)
rp.local_domain = "bob"
- self.assertEqual('bob', rp.local_domain)
- self.assertIn('domain bob', str(rp))
+ self.assertEqual("bob", rp.local_domain)
+ self.assertIn("domain bob", str(rp))
def test_nameservers(self):
rp = resolv_conf.ResolvConf(BASE_RESOLVE)
- self.assertIn('10.15.44.14', rp.nameservers)
- self.assertIn('10.15.30.92', rp.nameservers)
- rp.add_nameserver('10.2')
- self.assertIn('10.2', rp.nameservers)
- self.assertIn('nameserver 10.2', str(rp))
- self.assertNotIn('10.3', rp.nameservers)
+ self.assertIn("10.15.44.14", rp.nameservers)
+ self.assertIn("10.15.30.92", rp.nameservers)
+ rp.add_nameserver("10.2")
+ self.assertIn("10.2", rp.nameservers)
+ self.assertIn("nameserver 10.2", str(rp))
+ self.assertNotIn("10.3", rp.nameservers)
self.assertEqual(len(rp.nameservers), 3)
- rp.add_nameserver('10.2')
- rp.add_nameserver('10.3')
- self.assertNotIn('10.3', rp.nameservers)
+ rp.add_nameserver("10.2")
+ rp.add_nameserver("10.3")
+ self.assertNotIn("10.3", rp.nameservers)
def test_search_domains(self):
rp = resolv_conf.ResolvConf(BASE_RESOLVE)
- self.assertIn('yahoo.com', rp.search_domains)
- self.assertIn('blah.yahoo.com', rp.search_domains)
- rp.add_search_domain('bbb.y.com')
- self.assertIn('bbb.y.com', rp.search_domains)
- self.assertTrue(re.search(r'search(.*)bbb.y.com(.*)', str(rp)))
- self.assertIn('bbb.y.com', rp.search_domains)
- rp.add_search_domain('bbb.y.com')
+ self.assertIn("yahoo.com", rp.search_domains)
+ self.assertIn("blah.yahoo.com", rp.search_domains)
+ rp.add_search_domain("bbb.y.com")
+ self.assertIn("bbb.y.com", rp.search_domains)
+ self.assertTrue(re.search(r"search(.*)bbb.y.com(.*)", str(rp)))
+ self.assertIn("bbb.y.com", rp.search_domains)
+ rp.add_search_domain("bbb.y.com")
self.assertEqual(len(rp.search_domains), 3)
- rp.add_search_domain('bbb2.y.com')
+ rp.add_search_domain("bbb2.y.com")
self.assertEqual(len(rp.search_domains), 4)
- rp.add_search_domain('bbb3.y.com')
+ rp.add_search_domain("bbb3.y.com")
self.assertEqual(len(rp.search_domains), 5)
- rp.add_search_domain('bbb4.y.com')
+ rp.add_search_domain("bbb4.y.com")
self.assertEqual(len(rp.search_domains), 6)
- self.assertRaises(ValueError, rp.add_search_domain, 'bbb5.y.com')
+ self.assertRaises(ValueError, rp.add_search_domain, "bbb5.y.com")
self.assertEqual(len(rp.search_domains), 6)
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_distros/test_sles.py b/tests/unittests/distros/test_sles.py
index 33e3c457..66b8b13d 100644
--- a/tests/unittests/test_distros/test_sles.py
+++ b/tests/unittests/distros/test_sles.py
@@ -1,12 +1,11 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.tests.helpers import CiTestCase
+from tests.unittests.helpers import CiTestCase
from . import _get_distro
class TestSLES(CiTestCase):
-
def test_get_distro(self):
distro = _get_distro("sles")
- self.assertEqual(distro.osfamily, 'suse')
+ self.assertEqual(distro.osfamily, "suse")
diff --git a/tests/unittests/test_distros/test_sysconfig.py b/tests/unittests/distros/test_sysconfig.py
index c1d5b693..d0979e17 100644
--- a/tests/unittests/test_distros/test_sysconfig.py
+++ b/tests/unittests/distros/test_sysconfig.py
@@ -3,22 +3,23 @@
import re
from cloudinit.distros.parsers.sys_conf import SysConf
-
-from cloudinit.tests.helpers import TestCase
-
+from tests.unittests.helpers import TestCase
# Lots of good examples @
# http://content.hccfl.edu/pollock/AUnix1/SysconfigFilesDesc.txt
+
class TestSysConfHelper(TestCase):
# This function was added in 2.7, make it work for 2.6
def assertRegMatches(self, text, regexp):
regexp = re.compile(regexp)
- self.assertTrue(regexp.search(text),
- msg="%s must match %s!" % (text, regexp.pattern))
+ self.assertTrue(
+ regexp.search(text),
+ msg="%s must match %s!" % (text, regexp.pattern),
+ )
def test_parse_no_change(self):
- contents = '''# A comment
+ contents = """# A comment
USESMBAUTH=no
KEYTABLE=/usr/lib/kbd/keytables/us.map
SHORTDATE=$(date +%y:%m:%d:%H:%M)
@@ -28,59 +29,64 @@ NETMASK0=255.255.255.0
LIST=$LOGROOT/incremental-list
IPV6TO4_ROUTING='eth0-:0004::1/64 eth1-:0005::1/64'
ETHTOOL_OPTS="-K ${DEVICE} tso on; -G ${DEVICE} rx 256 tx 256"
-USEMD5=no'''
+USEMD5=no"""
conf = SysConf(contents.splitlines())
- self.assertEqual(conf['HOSTNAME'], 'blahblah')
- self.assertEqual(conf['SHORTDATE'], '$(date +%y:%m:%d:%H:%M)')
+ self.assertEqual(conf["HOSTNAME"], "blahblah")
+ self.assertEqual(conf["SHORTDATE"], "$(date +%y:%m:%d:%H:%M)")
# Should be unquoted
- self.assertEqual(conf['ETHTOOL_OPTS'], ('-K ${DEVICE} tso on; '
- '-G ${DEVICE} rx 256 tx 256'))
+ self.assertEqual(
+ conf["ETHTOOL_OPTS"],
+ "-K ${DEVICE} tso on; -G ${DEVICE} rx 256 tx 256",
+ )
self.assertEqual(contents, str(conf))
def test_parse_shell_vars(self):
- contents = 'USESMBAUTH=$XYZ'
+ contents = "USESMBAUTH=$XYZ"
conf = SysConf(contents.splitlines())
self.assertEqual(contents, str(conf))
- conf = SysConf('')
- conf['B'] = '${ZZ}d apples'
+ conf = SysConf("")
+ conf["B"] = "${ZZ}d apples"
# Should be quoted
self.assertEqual('B="${ZZ}d apples"', str(conf))
- conf = SysConf('')
- conf['B'] = '$? d apples'
+ conf = SysConf("")
+ conf["B"] = "$? d apples"
self.assertEqual('B="$? d apples"', str(conf))
contents = 'IPMI_WATCHDOG_OPTIONS="timeout=60"'
conf = SysConf(contents.splitlines())
- self.assertEqual('IPMI_WATCHDOG_OPTIONS=timeout=60', str(conf))
+ self.assertEqual("IPMI_WATCHDOG_OPTIONS=timeout=60", str(conf))
def test_parse_adjust(self):
contents = 'IPV6TO4_ROUTING="eth0-:0004::1/64 eth1-:0005::1/64"'
conf = SysConf(contents.splitlines())
# Should be unquoted
- self.assertEqual('eth0-:0004::1/64 eth1-:0005::1/64',
- conf['IPV6TO4_ROUTING'])
- conf['IPV6TO4_ROUTING'] = "blah \tblah"
+ self.assertEqual(
+ "eth0-:0004::1/64 eth1-:0005::1/64", conf["IPV6TO4_ROUTING"]
+ )
+ conf["IPV6TO4_ROUTING"] = "blah \tblah"
contents2 = str(conf).strip()
# Should be requoted due to whitespace
- self.assertRegMatches(contents2,
- r'IPV6TO4_ROUTING=[\']blah\s+blah[\']')
+ self.assertRegMatches(
+ contents2, r"IPV6TO4_ROUTING=[\']blah\s+blah[\']"
+ )
def test_parse_no_adjust_shell(self):
- conf = SysConf(''.splitlines())
- conf['B'] = ' $(time)'
+ conf = SysConf("".splitlines())
+ conf["B"] = " $(time)"
contents = str(conf)
- self.assertEqual('B= $(time)', contents)
+ self.assertEqual("B= $(time)", contents)
def test_parse_empty(self):
- contents = ''
+ contents = ""
conf = SysConf(contents.splitlines())
- self.assertEqual('', str(conf).strip())
+ self.assertEqual("", str(conf).strip())
def test_parse_add_new(self):
- contents = 'BLAH=b'
+ contents = "BLAH=b"
conf = SysConf(contents.splitlines())
- conf['Z'] = 'd'
+ conf["Z"] = "d"
contents = str(conf)
self.assertIn("Z=d", contents)
self.assertIn("BLAH=b", contents)
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/distros/test_user_data_normalize.py b/tests/unittests/distros/test_user_data_normalize.py
new file mode 100644
index 00000000..67ea024b
--- /dev/null
+++ b/tests/unittests/distros/test_user_data_normalize.py
@@ -0,0 +1,365 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from unittest import mock
+
+from cloudinit import distros, helpers, settings
+from cloudinit.distros import ug_util
+from tests.unittests.helpers import TestCase
+
+bcfg = {
+ "name": "bob",
+ "plain_text_passwd": "ubuntu",
+ "home": "/home/ubuntu",
+ "shell": "/bin/bash",
+ "lock_passwd": True,
+ "gecos": "Ubuntu",
+ "groups": ["foo"],
+}
+
+
+class TestUGNormalize(TestCase):
+ def setUp(self):
+ super(TestUGNormalize, self).setUp()
+ self.add_patch("cloudinit.util.system_is_snappy", "m_snappy")
+
+ def _make_distro(self, dtype, def_user=None):
+ cfg = dict(settings.CFG_BUILTIN)
+ cfg["system_info"]["distro"] = dtype
+ paths = helpers.Paths(cfg["system_info"]["paths"])
+ distro_cls = distros.fetch(dtype)
+ if def_user:
+ cfg["system_info"]["default_user"] = def_user.copy()
+ distro = distro_cls(dtype, cfg["system_info"], paths)
+ return distro
+
+ def _norm(self, cfg, distro):
+ return ug_util.normalize_users_groups(cfg, distro)
+
+ def test_group_dict(self):
+ distro = self._make_distro("ubuntu")
+ g = {
+ "groups": [
+ {"ubuntu": ["foo", "bar"], "bob": "users"},
+ "cloud-users",
+ {"bob": "users2"},
+ ]
+ }
+ (_users, groups) = self._norm(g, distro)
+ self.assertIn("ubuntu", groups)
+ ub_members = groups["ubuntu"]
+ self.assertEqual(sorted(["foo", "bar"]), sorted(ub_members))
+ self.assertIn("bob", groups)
+ b_members = groups["bob"]
+ self.assertEqual(sorted(["users", "users2"]), sorted(b_members))
+
+ def test_basic_groups(self):
+ distro = self._make_distro("ubuntu")
+ ug_cfg = {
+ "groups": ["bob"],
+ }
+ (users, groups) = self._norm(ug_cfg, distro)
+ self.assertIn("bob", groups)
+ self.assertEqual({}, users)
+
+ def test_csv_groups(self):
+ distro = self._make_distro("ubuntu")
+ ug_cfg = {
+ "groups": "bob,joe,steve",
+ }
+ (users, groups) = self._norm(ug_cfg, distro)
+ self.assertIn("bob", groups)
+ self.assertIn("joe", groups)
+ self.assertIn("steve", groups)
+ self.assertEqual({}, users)
+
+ def test_more_groups(self):
+ distro = self._make_distro("ubuntu")
+ ug_cfg = {"groups": ["bob", "joe", "steve"]}
+ (users, groups) = self._norm(ug_cfg, distro)
+ self.assertIn("bob", groups)
+ self.assertIn("joe", groups)
+ self.assertIn("steve", groups)
+ self.assertEqual({}, users)
+
+ def test_member_groups(self):
+ distro = self._make_distro("ubuntu")
+ ug_cfg = {
+ "groups": {
+ "bob": ["s"],
+ "joe": [],
+ "steve": [],
+ }
+ }
+ (users, groups) = self._norm(ug_cfg, distro)
+ self.assertIn("bob", groups)
+ self.assertEqual(["s"], groups["bob"])
+ self.assertEqual([], groups["joe"])
+ self.assertIn("joe", groups)
+ self.assertIn("steve", groups)
+ self.assertEqual({}, users)
+
+ def test_users_simple_dict(self):
+ distro = self._make_distro("ubuntu", bcfg)
+ ug_cfg = {
+ "users": {
+ "default": True,
+ }
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertIn("bob", users)
+ ug_cfg = {
+ "users": {
+ "default": "yes",
+ }
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertIn("bob", users)
+ ug_cfg = {
+ "users": {
+ "default": "1",
+ }
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertIn("bob", users)
+
+ def test_users_simple_dict_no(self):
+ distro = self._make_distro("ubuntu", bcfg)
+ ug_cfg = {
+ "users": {
+ "default": False,
+ }
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertEqual({}, users)
+ ug_cfg = {
+ "users": {
+ "default": "no",
+ }
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertEqual({}, users)
+
+ def test_users_simple_csv(self):
+ distro = self._make_distro("ubuntu")
+ ug_cfg = {
+ "users": "joe,bob",
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertIn("joe", users)
+ self.assertIn("bob", users)
+ self.assertEqual({"default": False}, users["joe"])
+ self.assertEqual({"default": False}, users["bob"])
+
+ def test_users_simple(self):
+ distro = self._make_distro("ubuntu")
+ ug_cfg = {
+ "users": ["joe", "bob"],
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertIn("joe", users)
+ self.assertIn("bob", users)
+ self.assertEqual({"default": False}, users["joe"])
+ self.assertEqual({"default": False}, users["bob"])
+
+ def test_users_old_user(self):
+ distro = self._make_distro("ubuntu", bcfg)
+ ug_cfg = {"user": "zetta", "users": "default"}
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertNotIn("bob", users) # Bob is not the default now, zetta is
+ self.assertIn("zetta", users)
+ self.assertTrue(users["zetta"]["default"])
+ self.assertNotIn("default", users)
+ ug_cfg = {"user": "zetta", "users": "default, joe"}
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertNotIn("bob", users) # Bob is not the default now, zetta is
+ self.assertIn("joe", users)
+ self.assertIn("zetta", users)
+ self.assertTrue(users["zetta"]["default"])
+ self.assertNotIn("default", users)
+ ug_cfg = {"user": "zetta", "users": ["bob", "joe"]}
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertIn("bob", users)
+ self.assertIn("joe", users)
+ self.assertIn("zetta", users)
+ self.assertTrue(users["zetta"]["default"])
+ ug_cfg = {
+ "user": "zetta",
+ "users": {
+ "bob": True,
+ "joe": True,
+ },
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertIn("bob", users)
+ self.assertIn("joe", users)
+ self.assertIn("zetta", users)
+ self.assertTrue(users["zetta"]["default"])
+ ug_cfg = {
+ "user": "zetta",
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertIn("zetta", users)
+ ug_cfg = {}
+ (users, groups) = self._norm(ug_cfg, distro)
+ self.assertEqual({}, users)
+ self.assertEqual({}, groups)
+
+ def test_users_dict_default_additional(self):
+ distro = self._make_distro("ubuntu", bcfg)
+ ug_cfg = {
+ "users": [{"name": "default", "blah": True}],
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertIn("bob", users)
+ self.assertEqual(
+ ",".join(distro.get_default_user()["groups"]),
+ users["bob"]["groups"],
+ )
+ self.assertEqual(True, users["bob"]["blah"])
+ self.assertEqual(True, users["bob"]["default"])
+
+ def test_users_dict_extract(self):
+ distro = self._make_distro("ubuntu", bcfg)
+ ug_cfg = {
+ "users": [
+ "default",
+ ],
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertIn("bob", users)
+ (name, config) = ug_util.extract_default(users)
+ self.assertEqual(name, "bob")
+ expected_config = {}
+ def_config = None
+ try:
+ def_config = distro.get_default_user()
+ except NotImplementedError:
+ pass
+ if not def_config:
+ def_config = {}
+ expected_config.update(def_config)
+
+ # Ignore these for now
+ expected_config.pop("name", None)
+ expected_config.pop("groups", None)
+ config.pop("groups", None)
+ self.assertEqual(config, expected_config)
+
+ def test_users_dict_default(self):
+ distro = self._make_distro("ubuntu", bcfg)
+ ug_cfg = {
+ "users": [
+ "default",
+ ],
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertIn("bob", users)
+ self.assertEqual(
+ ",".join(distro.get_default_user()["groups"]),
+ users["bob"]["groups"],
+ )
+ self.assertEqual(True, users["bob"]["default"])
+
+ def test_users_dict_trans(self):
+ distro = self._make_distro("ubuntu")
+ ug_cfg = {
+ "users": [
+ {"name": "joe", "tr-me": True},
+ {"name": "bob"},
+ ],
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertIn("joe", users)
+ self.assertIn("bob", users)
+ self.assertEqual({"tr_me": True, "default": False}, users["joe"])
+ self.assertEqual({"default": False}, users["bob"])
+
+ def test_users_dict(self):
+ distro = self._make_distro("ubuntu")
+ ug_cfg = {
+ "users": [
+ {"name": "joe"},
+ {"name": "bob"},
+ ],
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ self.assertIn("joe", users)
+ self.assertIn("bob", users)
+ self.assertEqual({"default": False}, users["joe"])
+ self.assertEqual({"default": False}, users["bob"])
+
+ @mock.patch("cloudinit.subp.subp")
+ def test_create_snap_user(self, mock_subp):
+ mock_subp.side_effect = [
+ ('{"username": "joe", "ssh-key-count": 1}\n', "")
+ ]
+ distro = self._make_distro("ubuntu")
+ ug_cfg = {
+ "users": [
+ {"name": "joe", "snapuser": "joe@joe.com"},
+ ],
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ for (user, config) in users.items():
+ print("user=%s config=%s" % (user, config))
+ username = distro.create_user(user, **config)
+
+ snapcmd = ["snap", "create-user", "--sudoer", "--json", "joe@joe.com"]
+ mock_subp.assert_called_with(snapcmd, capture=True, logstring=snapcmd)
+ self.assertEqual(username, "joe")
+
+ @mock.patch("cloudinit.subp.subp")
+ def test_create_snap_user_known(self, mock_subp):
+ mock_subp.side_effect = [
+ ('{"username": "joe", "ssh-key-count": 1}\n', "")
+ ]
+ distro = self._make_distro("ubuntu")
+ ug_cfg = {
+ "users": [
+ {"name": "joe", "snapuser": "joe@joe.com", "known": True},
+ ],
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ for (user, config) in users.items():
+ print("user=%s config=%s" % (user, config))
+ username = distro.create_user(user, **config)
+
+ snapcmd = [
+ "snap",
+ "create-user",
+ "--sudoer",
+ "--json",
+ "--known",
+ "joe@joe.com",
+ ]
+ mock_subp.assert_called_with(snapcmd, capture=True, logstring=snapcmd)
+ self.assertEqual(username, "joe")
+
+ @mock.patch("cloudinit.util.system_is_snappy")
+ @mock.patch("cloudinit.util.is_group")
+ @mock.patch("cloudinit.subp.subp")
+ def test_add_user_on_snappy_system(
+ self, mock_subp, mock_isgrp, mock_snappy
+ ):
+ mock_isgrp.return_value = False
+ mock_subp.return_value = True
+ mock_snappy.return_value = True
+ distro = self._make_distro("ubuntu")
+ ug_cfg = {
+ "users": [
+ {"name": "joe", "groups": "users", "create_groups": True},
+ ],
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ for (user, config) in users.items():
+ print("user=%s config=%s" % (user, config))
+ distro.add_user(user, **config)
+
+ groupcmd = ["groupadd", "users", "--extrausers"]
+ addcmd = ["useradd", "joe", "--extrausers", "--groups", "users", "-m"]
+
+ mock_subp.assert_any_call(groupcmd)
+ mock_subp.assert_any_call(addcmd, logstring=addcmd)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/__init__.py b/tests/unittests/filters/__init__.py
index e69de29b..e69de29b 100644
--- a/tests/unittests/test_datasource/__init__.py
+++ b/tests/unittests/filters/__init__.py
diff --git a/tests/unittests/test_filters/test_launch_index.py b/tests/unittests/filters/test_launch_index.py
index 1492361e..679bdfc3 100644
--- a/tests/unittests/test_filters/test_launch_index.py
+++ b/tests/unittests/filters/test_launch_index.py
@@ -3,11 +3,10 @@
import copy
from itertools import filterfalse
-from cloudinit.tests import helpers
-
-from cloudinit.filters import launch_index
from cloudinit import user_data as ud
from cloudinit import util
+from cloudinit.filters import launch_index
+from tests.unittests import helpers
def count_messages(root):
@@ -20,7 +19,6 @@ def count_messages(root):
class TestLaunchFilter(helpers.ResourceUsingTestCase):
-
def assertCounts(self, message, expected_counts):
orig_message = copy.deepcopy(message)
for (index, count) in expected_counts.items():
@@ -54,7 +52,7 @@ class TestLaunchFilter(helpers.ResourceUsingTestCase):
return True
def testMultiEmailIndex(self):
- test_data = helpers.readResource('filter_cloud_multipart_2.email')
+ test_data = helpers.readResource("filter_cloud_multipart_2.email")
ud_proc = ud.UserDataProcessor(self.getCloudPaths())
message = ud_proc.process(test_data)
self.assertTrue(count_messages(message) > 0)
@@ -69,7 +67,7 @@ class TestLaunchFilter(helpers.ResourceUsingTestCase):
self.assertCounts(message, expected_counts)
def testHeaderEmailIndex(self):
- test_data = helpers.readResource('filter_cloud_multipart_header.email')
+ test_data = helpers.readResource("filter_cloud_multipart_header.email")
ud_proc = ud.UserDataProcessor(self.getCloudPaths())
message = ud_proc.process(test_data)
self.assertTrue(count_messages(message) > 0)
@@ -78,13 +76,13 @@ class TestLaunchFilter(helpers.ResourceUsingTestCase):
expected_counts = {
5: 1,
-1: 0,
- 'c': 1,
+ "c": 1,
None: 1,
}
self.assertCounts(message, expected_counts)
def testConfigEmailIndex(self):
- test_data = helpers.readResource('filter_cloud_multipart_1.email')
+ test_data = helpers.readResource("filter_cloud_multipart_1.email")
ud_proc = ud.UserDataProcessor(self.getCloudPaths())
message = ud_proc.process(test_data)
self.assertTrue(count_messages(message) > 0)
@@ -98,7 +96,7 @@ class TestLaunchFilter(helpers.ResourceUsingTestCase):
self.assertCounts(message, expected_counts)
def testNoneIndex(self):
- test_data = helpers.readResource('filter_cloud_multipart.yaml')
+ test_data = helpers.readResource("filter_cloud_multipart.yaml")
ud_proc = ud.UserDataProcessor(self.getCloudPaths())
message = ud_proc.process(test_data)
start_count = count_messages(message)
@@ -107,7 +105,7 @@ class TestLaunchFilter(helpers.ResourceUsingTestCase):
self.assertTrue(self.equivalentMessage(message, filtered_message))
def testIndexes(self):
- test_data = helpers.readResource('filter_cloud_multipart.yaml')
+ test_data = helpers.readResource("filter_cloud_multipart.yaml")
ud_proc = ud.UserDataProcessor(self.getCloudPaths())
message = ud_proc.process(test_data)
start_count = count_messages(message)
@@ -126,10 +124,11 @@ class TestLaunchFilter(helpers.ResourceUsingTestCase):
# None should just give all back
None: start_count,
# Non ints should be ignored
- 'c': start_count,
+ "c": start_count,
# Strings should be converted
- '1': 2,
+ "1": 2,
}
self.assertCounts(message, expected_counts)
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py
new file mode 100644
index 00000000..67fed8c9
--- /dev/null
+++ b/tests/unittests/helpers.py
@@ -0,0 +1,554 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import functools
+import io
+import logging
+import os
+import random
+import shutil
+import string
+import sys
+import tempfile
+import time
+import unittest
+from contextlib import ExitStack, contextmanager
+from pathlib import Path
+from unittest import mock
+from unittest.util import strclass
+
+import httpretty
+
+import cloudinit
+from cloudinit import cloud, distros
+from cloudinit import helpers as ch
+from cloudinit import subp, util
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ validate_cloudconfig_schema,
+)
+from cloudinit.sources import DataSourceNone
+from cloudinit.templater import JINJA_AVAILABLE
+
+_real_subp = subp.subp
+
+# Used for skipping tests
+SkipTest = unittest.SkipTest
+skipIf = unittest.skipIf
+
+
+# Makes the old path start
+# with new base instead of whatever
+# it previously had
+def rebase_path(old_path, new_base):
+ if old_path.startswith(new_base):
+ # Already handled...
+ return old_path
+ # Retarget the base of that path
+ # to the new base instead of the
+ # old one...
+ path = os.path.join(new_base, old_path.lstrip("/"))
+ path = os.path.abspath(path)
+ return path
+
+
+# Can work on anything that takes a path as arguments
+def retarget_many_wrapper(new_base, am, old_func):
+ def wrapper(*args, **kwds):
+ n_args = list(args)
+ nam = am
+ if am == -1:
+ nam = len(n_args)
+ for i in range(0, nam):
+ path = args[i]
+ # patchOS() wraps various os and os.path functions, however in
+ # Python 3 some of these now accept file-descriptors (integers).
+ # That breaks rebase_path() so in lieu of a better solution, just
+ # don't rebase if we get a fd.
+ if isinstance(path, str):
+ n_args[i] = rebase_path(path, new_base)
+ return old_func(*n_args, **kwds)
+
+ return wrapper
+
+
+class TestCase(unittest.TestCase):
+ def reset_global_state(self):
+ """Reset any global state to its original settings.
+
+ cloudinit caches some values in cloudinit.util. Unit tests that
+ involved those cached paths were then subject to failure if the order
+ of invocation changed (LP: #1703697).
+
+ This function resets any of these global state variables to their
+ initial state.
+
+ In the future this should really be done with some registry that
+ can then be cleaned in a more obvious way.
+ """
+ util.PROC_CMDLINE = None
+ util._DNS_REDIRECT_IP = None
+ util._LSB_RELEASE = {}
+
+ def setUp(self):
+ super(TestCase, self).setUp()
+ self.reset_global_state()
+
+ def shortDescription(self):
+ return strclass(self.__class__) + "." + self._testMethodName
+
+ def add_patch(self, target, attr, *args, **kwargs):
+ """Patches specified target object and sets it as attr on test
+ instance also schedules cleanup"""
+ if "autospec" not in kwargs:
+ kwargs["autospec"] = True
+ m = mock.patch(target, *args, **kwargs)
+ p = m.start()
+ self.addCleanup(m.stop)
+ setattr(self, attr, p)
+
+
+class CiTestCase(TestCase):
+ """This is the preferred test case base class unless user
+ needs other test case classes below."""
+
+ # Subclass overrides for specific test behavior
+ # Whether or not a unit test needs logfile setup
+ with_logs = False
+ allowed_subp = False
+ SUBP_SHELL_TRUE = "shell=true"
+
+ @contextmanager
+ def allow_subp(self, allowed_subp):
+ orig = self.allowed_subp
+ try:
+ self.allowed_subp = allowed_subp
+ yield
+ finally:
+ self.allowed_subp = orig
+
+ def setUp(self):
+ super(CiTestCase, self).setUp()
+ if self.with_logs:
+ # Create a log handler so unit tests can search expected logs.
+ self.logger = logging.getLogger()
+ self.logs = io.StringIO()
+ formatter = logging.Formatter("%(levelname)s: %(message)s")
+ handler = logging.StreamHandler(self.logs)
+ handler.setFormatter(formatter)
+ self.old_handlers = self.logger.handlers
+ self.logger.handlers = [handler]
+ if self.allowed_subp is True:
+ subp.subp = _real_subp
+ else:
+ subp.subp = self._fake_subp
+
+ def _fake_subp(self, *args, **kwargs):
+ if "args" in kwargs:
+ cmd = kwargs["args"]
+ else:
+ if not args:
+ raise TypeError(
+ "subp() missing 1 required positional argument: 'args'"
+ )
+ cmd = args[0]
+
+ if not isinstance(cmd, str):
+ cmd = cmd[0]
+ pass_through = False
+ if not isinstance(self.allowed_subp, (list, bool)):
+ raise TypeError("self.allowed_subp supports list or bool.")
+ if isinstance(self.allowed_subp, bool):
+ pass_through = self.allowed_subp
+ else:
+ pass_through = (cmd in self.allowed_subp) or (
+ self.SUBP_SHELL_TRUE in self.allowed_subp
+ and kwargs.get("shell")
+ )
+ if pass_through:
+ return _real_subp(*args, **kwargs)
+ raise Exception(
+ "called subp. set self.allowed_subp=True to allow\n subp(%s)"
+ % ", ".join(
+ [str(repr(a)) for a in args]
+ + ["%s=%s" % (k, repr(v)) for k, v in kwargs.items()]
+ )
+ )
+
+ def tearDown(self):
+ if self.with_logs:
+ # Remove the handler we setup
+ logging.getLogger().handlers = self.old_handlers
+ logging.getLogger().setLevel(logging.NOTSET)
+ subp.subp = _real_subp
+ super(CiTestCase, self).tearDown()
+
+ def tmp_dir(self, dir=None, cleanup=True):
+ # return a full path to a temporary directory that will be cleaned up.
+ if dir is None:
+ tmpd = tempfile.mkdtemp(prefix="ci-%s." % self.__class__.__name__)
+ else:
+ tmpd = tempfile.mkdtemp(dir=dir)
+ self.addCleanup(
+ functools.partial(shutil.rmtree, tmpd, ignore_errors=True)
+ )
+ return tmpd
+
+ def tmp_path(self, path, dir=None):
+ # return an absolute path to 'path' under dir.
+ # if dir is None, one will be created with tmp_dir()
+ # the file is not created or modified.
+ if dir is None:
+ dir = self.tmp_dir()
+ return os.path.normpath(os.path.abspath(os.path.join(dir, path)))
+
+ def tmp_cloud(self, distro, sys_cfg=None, metadata=None):
+ """Create a cloud with tmp working directory paths.
+
+ @param distro: Name of the distro to attach to the cloud.
+ @param metadata: Optional metadata to set on the datasource.
+
+ @return: The built cloud instance.
+ """
+ self.new_root = self.tmp_dir()
+ if not sys_cfg:
+ sys_cfg = {}
+ tmp_paths = {}
+ for var in ["templates_dir", "run_dir", "cloud_dir"]:
+ tmp_paths[var] = self.tmp_path(var, dir=self.new_root)
+ util.ensure_dir(tmp_paths[var])
+ self.paths = ch.Paths(tmp_paths)
+ cls = distros.fetch(distro)
+ mydist = cls(distro, sys_cfg, self.paths)
+ myds = DataSourceNone.DataSourceNone(sys_cfg, mydist, self.paths)
+ if metadata:
+ myds.metadata.update(metadata)
+ return cloud.Cloud(myds, self.paths, sys_cfg, mydist, None)
+
+ @classmethod
+ def random_string(cls, length=8):
+ """return a random lowercase string with default length of 8"""
+ return "".join(
+ random.choice(string.ascii_lowercase) for _ in range(length)
+ )
+
+
+class ResourceUsingTestCase(CiTestCase):
+ def setUp(self):
+ super(ResourceUsingTestCase, self).setUp()
+ self.resource_path = None
+
+ def getCloudPaths(self, ds=None):
+ tmpdir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, tmpdir)
+ cp = ch.Paths(
+ {"cloud_dir": tmpdir, "templates_dir": resourceLocation()}, ds=ds
+ )
+ return cp
+
+
+class FilesystemMockingTestCase(ResourceUsingTestCase):
+ def setUp(self):
+ super(FilesystemMockingTestCase, self).setUp()
+ self.patched_funcs = ExitStack()
+
+ def tearDown(self):
+ self.patched_funcs.close()
+ ResourceUsingTestCase.tearDown(self)
+
+ def replicateTestRoot(self, example_root, target_root):
+ real_root = resourceLocation()
+ real_root = os.path.join(real_root, "roots", example_root)
+ for (dir_path, _dirnames, filenames) in os.walk(real_root):
+ real_path = dir_path
+ make_path = rebase_path(real_path[len(real_root) :], target_root)
+ util.ensure_dir(make_path)
+ for f in filenames:
+ real_path = util.abs_join(real_path, f)
+ make_path = util.abs_join(make_path, f)
+ shutil.copy(real_path, make_path)
+
+ def patchUtils(self, new_root):
+ patch_funcs = {
+ util: [
+ ("write_file", 1),
+ ("append_file", 1),
+ ("load_file", 1),
+ ("ensure_dir", 1),
+ ("chmod", 1),
+ ("delete_dir_contents", 1),
+ ("del_file", 1),
+ ("sym_link", -1),
+ ("copy", -1),
+ ],
+ }
+ for (mod, funcs) in patch_funcs.items():
+ for (f, am) in funcs:
+ func = getattr(mod, f)
+ trap_func = retarget_many_wrapper(new_root, am, func)
+ self.patched_funcs.enter_context(
+ mock.patch.object(mod, f, trap_func)
+ )
+
+ # Handle subprocess calls
+ func = getattr(subp, "subp")
+
+ def nsubp(*_args, **_kwargs):
+ return ("", "")
+
+ self.patched_funcs.enter_context(
+ mock.patch.object(subp, "subp", nsubp)
+ )
+
+ def null_func(*_args, **_kwargs):
+ return None
+
+ for f in ["chownbyid", "chownbyname"]:
+ self.patched_funcs.enter_context(
+ mock.patch.object(util, f, null_func)
+ )
+
+ def patchOS(self, new_root):
+ patch_funcs = {
+ os.path: [
+ ("isfile", 1),
+ ("exists", 1),
+ ("islink", 1),
+ ("isdir", 1),
+ ("lexists", 1),
+ ],
+ os: [
+ ("listdir", 1),
+ ("mkdir", 1),
+ ("lstat", 1),
+ ("symlink", 2),
+ ("stat", 1),
+ ],
+ }
+
+ if hasattr(os, "scandir"):
+ # py27 does not have scandir
+ patch_funcs[os].append(("scandir", 1))
+
+ for (mod, funcs) in patch_funcs.items():
+ for f, nargs in funcs:
+ func = getattr(mod, f)
+ trap_func = retarget_many_wrapper(new_root, nargs, func)
+ self.patched_funcs.enter_context(
+ mock.patch.object(mod, f, trap_func)
+ )
+
+ def patchOpen(self, new_root):
+ trap_func = retarget_many_wrapper(new_root, 1, open)
+ self.patched_funcs.enter_context(
+ mock.patch("builtins.open", trap_func)
+ )
+
+ def patchStdoutAndStderr(self, stdout=None, stderr=None):
+ if stdout is not None:
+ self.patched_funcs.enter_context(
+ mock.patch.object(sys, "stdout", stdout)
+ )
+ if stderr is not None:
+ self.patched_funcs.enter_context(
+ mock.patch.object(sys, "stderr", stderr)
+ )
+
+ def reRoot(self, root=None):
+ if root is None:
+ root = self.tmp_dir()
+ self.patchUtils(root)
+ self.patchOS(root)
+ self.patchOpen(root)
+ return root
+
+ @contextmanager
+ def reRooted(self, root=None):
+ try:
+ yield self.reRoot(root)
+ finally:
+ self.patched_funcs.close()
+
+
+class HttprettyTestCase(CiTestCase):
+ # necessary as http_proxy gets in the way of httpretty
+ # https://github.com/gabrielfalcao/HTTPretty/issues/122
+ # Also make sure that allow_net_connect is set to False.
+ # And make sure reset and enable/disable are done.
+
+ def setUp(self):
+ self.restore_proxy = os.environ.get("http_proxy")
+ if self.restore_proxy is not None:
+ del os.environ["http_proxy"]
+ super(HttprettyTestCase, self).setUp()
+ httpretty.HTTPretty.allow_net_connect = False
+ httpretty.reset()
+ httpretty.enable()
+ # Stop the logging from HttpPretty so our logs don't get mixed
+ # up with its logs
+ logging.getLogger("httpretty.core").setLevel(logging.CRITICAL)
+
+ def tearDown(self):
+ httpretty.disable()
+ httpretty.reset()
+ if self.restore_proxy:
+ os.environ["http_proxy"] = self.restore_proxy
+ super(HttprettyTestCase, self).tearDown()
+
+
+class SchemaTestCaseMixin(unittest.TestCase):
+ def assertSchemaValid(self, cfg, msg="Valid Schema failed validation."):
+ """Assert the config is valid per self.schema.
+
+ If there is only one top level key in the schema properties, then
+ the cfg will be put under that key."""
+ props = list(self.schema.get("properties"))
+ # put cfg under top level key if there is only one in the schema
+ if len(props) == 1:
+ cfg = {props[0]: cfg}
+ try:
+ validate_cloudconfig_schema(cfg, self.schema, strict=True)
+ except SchemaValidationError:
+ self.fail(msg)
+
+
+def populate_dir(path, files):
+ if not os.path.exists(path):
+ os.makedirs(path)
+ ret = []
+ for (name, content) in files.items():
+ p = os.path.sep.join([path, name])
+ util.ensure_dir(os.path.dirname(p))
+ with open(p, "wb") as fp:
+ if isinstance(content, bytes):
+ fp.write(content)
+ else:
+ fp.write(content.encode("utf-8"))
+ fp.close()
+ ret.append(p)
+
+ return ret
+
+
+def populate_dir_with_ts(path, data):
+ """data is {'file': ('contents', mtime)}. mtime relative to now."""
+ populate_dir(path, dict((k, v[0]) for k, v in data.items()))
+ btime = time.time()
+ for fpath, (_contents, mtime) in data.items():
+ ts = btime + mtime if mtime else btime
+ os.utime(os.path.sep.join((path, fpath)), (ts, ts))
+
+
+def dir2dict(startdir, prefix=None):
+ flist = {}
+ if prefix is None:
+ prefix = startdir
+ for root, _dirs, files in os.walk(startdir):
+ for fname in files:
+ fpath = os.path.join(root, fname)
+ key = fpath[len(prefix) :]
+ flist[key] = util.load_file(fpath)
+ return flist
+
+
+def wrap_and_call(prefix, mocks, func, *args, **kwargs):
+ """
+ call func(args, **kwargs) with mocks applied, then unapplies mocks
+ nicer to read than repeating dectorators on each function
+
+ prefix: prefix for mock names (e.g. 'cloudinit.stages.util') or None
+ mocks: dictionary of names (under 'prefix') to mock and either
+ a return value or a dictionary to pass to the mock.patch call
+ func: function to call with mocks applied
+ *args,**kwargs: arguments for 'func'
+
+ return_value: return from 'func'
+ """
+ delim = "."
+ if prefix is None:
+ prefix = ""
+ prefix = prefix.rstrip(delim)
+ unwraps = []
+ for fname, kw in mocks.items():
+ if prefix:
+ fname = delim.join((prefix, fname))
+ if not isinstance(kw, dict):
+ kw = {"return_value": kw}
+ p = mock.patch(fname, **kw)
+ p.start()
+ unwraps.append(p)
+ try:
+ return func(*args, **kwargs)
+ finally:
+ for p in unwraps:
+ p.stop()
+
+
+def resourceLocation(subname=None):
+ path = cloud_init_project_dir("tests/data")
+ if not subname:
+ return path
+ return os.path.join(path, subname)
+
+
+def readResource(name, mode="r"):
+ with open(resourceLocation(name), mode) as fh:
+ return fh.read()
+
+
+try:
+ import jsonschema
+
+ assert jsonschema # avoid pyflakes error F401: import unused
+ _missing_jsonschema_dep = False
+except ImportError:
+ _missing_jsonschema_dep = True
+
+
+def skipUnlessJsonSchema():
+ return skipIf(
+ _missing_jsonschema_dep, "No python-jsonschema dependency present."
+ )
+
+
+def skipUnlessJinja():
+ return skipIf(not JINJA_AVAILABLE, "No jinja dependency present.")
+
+
+def skipIfJinja():
+ return skipIf(JINJA_AVAILABLE, "Jinja dependency present.")
+
+
+# older versions of mock do not have the useful 'assert_not_called'
+if not hasattr(mock.Mock, "assert_not_called"):
+
+ def __mock_assert_not_called(mmock):
+ if mmock.call_count != 0:
+ msg = (
+ "[citest] Expected '%s' to not have been called. "
+ "Called %s times."
+ % (mmock._mock_name or "mock", mmock.call_count)
+ )
+ raise AssertionError(msg)
+
+ mock.Mock.assert_not_called = __mock_assert_not_called
+
+
+def get_top_level_dir() -> Path:
+ """Return the absolute path to the top cloudinit project directory
+
+ @return Path('<top-cloudinit-dir>')
+ """
+ return Path(cloudinit.__file__).parent.parent.resolve()
+
+
+def cloud_init_project_dir(sub_path: str) -> str:
+ """Get a path within the cloudinit project directory
+
+ @return str of the combined path
+
+ Example: cloud_init_project_dir("my/path") -> "/path/to/cloud-init/my/path"
+ """
+ return str(get_top_level_dir() / sub_path)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_filters/__init__.py b/tests/unittests/net/__init__.py
index e69de29b..e69de29b 100644
--- a/tests/unittests/test_filters/__init__.py
+++ b/tests/unittests/net/__init__.py
diff --git a/tests/unittests/net/test_dhcp.py b/tests/unittests/net/test_dhcp.py
new file mode 100644
index 00000000..876873d5
--- /dev/null
+++ b/tests/unittests/net/test_dhcp.py
@@ -0,0 +1,797 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import os
+import signal
+from textwrap import dedent
+
+import httpretty
+
+import cloudinit.net as net
+from cloudinit.net.dhcp import (
+ InvalidDHCPLeaseFileError,
+ dhcp_discovery,
+ maybe_perform_dhcp_discovery,
+ networkd_load_leases,
+ parse_dhcp_lease_file,
+ parse_static_routes,
+)
+from cloudinit.util import ensure_file, write_file
+from tests.unittests.helpers import (
+ CiTestCase,
+ HttprettyTestCase,
+ mock,
+ populate_dir,
+ wrap_and_call,
+)
+
+
+class TestParseDHCPLeasesFile(CiTestCase):
+ def test_parse_empty_lease_file_errors(self):
+ """parse_dhcp_lease_file errors when file content is empty."""
+ empty_file = self.tmp_path("leases")
+ ensure_file(empty_file)
+ with self.assertRaises(InvalidDHCPLeaseFileError) as context_manager:
+ parse_dhcp_lease_file(empty_file)
+ error = context_manager.exception
+ self.assertIn("Cannot parse empty dhcp lease file", str(error))
+
+ def test_parse_malformed_lease_file_content_errors(self):
+ """parse_dhcp_lease_file errors when file content isn't dhcp leases."""
+ non_lease_file = self.tmp_path("leases")
+ write_file(non_lease_file, "hi mom.")
+ with self.assertRaises(InvalidDHCPLeaseFileError) as context_manager:
+ parse_dhcp_lease_file(non_lease_file)
+ error = context_manager.exception
+ self.assertIn("Cannot parse dhcp lease file", str(error))
+
+ def test_parse_multiple_leases(self):
+ """parse_dhcp_lease_file returns a list of all leases within."""
+ lease_file = self.tmp_path("leases")
+ content = dedent(
+ """
+ lease {
+ interface "wlp3s0";
+ fixed-address 192.168.2.74;
+ filename "http://192.168.2.50/boot.php?mac=${netX}";
+ option subnet-mask 255.255.255.0;
+ option routers 192.168.2.1;
+ renew 4 2017/07/27 18:02:30;
+ expire 5 2017/07/28 07:08:15;
+ }
+ lease {
+ interface "wlp3s0";
+ fixed-address 192.168.2.74;
+ filename "http://192.168.2.50/boot.php?mac=${netX}";
+ option subnet-mask 255.255.255.0;
+ option routers 192.168.2.1;
+ }
+ """
+ )
+ expected = [
+ {
+ "interface": "wlp3s0",
+ "fixed-address": "192.168.2.74",
+ "subnet-mask": "255.255.255.0",
+ "routers": "192.168.2.1",
+ "renew": "4 2017/07/27 18:02:30",
+ "expire": "5 2017/07/28 07:08:15",
+ "filename": "http://192.168.2.50/boot.php?mac=${netX}",
+ },
+ {
+ "interface": "wlp3s0",
+ "fixed-address": "192.168.2.74",
+ "filename": "http://192.168.2.50/boot.php?mac=${netX}",
+ "subnet-mask": "255.255.255.0",
+ "routers": "192.168.2.1",
+ },
+ ]
+ write_file(lease_file, content)
+ self.assertCountEqual(expected, parse_dhcp_lease_file(lease_file))
+
+
+class TestDHCPRFC3442(CiTestCase):
+ def test_parse_lease_finds_rfc3442_classless_static_routes(self):
+ """parse_dhcp_lease_file returns rfc3442-classless-static-routes."""
+ lease_file = self.tmp_path("leases")
+ content = dedent(
+ """
+ lease {
+ interface "wlp3s0";
+ fixed-address 192.168.2.74;
+ option subnet-mask 255.255.255.0;
+ option routers 192.168.2.1;
+ option rfc3442-classless-static-routes 0,130,56,240,1;
+ renew 4 2017/07/27 18:02:30;
+ expire 5 2017/07/28 07:08:15;
+ }
+ """
+ )
+ expected = [
+ {
+ "interface": "wlp3s0",
+ "fixed-address": "192.168.2.74",
+ "subnet-mask": "255.255.255.0",
+ "routers": "192.168.2.1",
+ "rfc3442-classless-static-routes": "0,130,56,240,1",
+ "renew": "4 2017/07/27 18:02:30",
+ "expire": "5 2017/07/28 07:08:15",
+ }
+ ]
+ write_file(lease_file, content)
+ self.assertCountEqual(expected, parse_dhcp_lease_file(lease_file))
+
+ def test_parse_lease_finds_classless_static_routes(self):
+ """
+ parse_dhcp_lease_file returns classless-static-routes
+ for Centos lease format.
+ """
+ lease_file = self.tmp_path("leases")
+ content = dedent(
+ """
+ lease {
+ interface "wlp3s0";
+ fixed-address 192.168.2.74;
+ option subnet-mask 255.255.255.0;
+ option routers 192.168.2.1;
+ option classless-static-routes 0 130.56.240.1;
+ renew 4 2017/07/27 18:02:30;
+ expire 5 2017/07/28 07:08:15;
+ }
+ """
+ )
+ expected = [
+ {
+ "interface": "wlp3s0",
+ "fixed-address": "192.168.2.74",
+ "subnet-mask": "255.255.255.0",
+ "routers": "192.168.2.1",
+ "classless-static-routes": "0 130.56.240.1",
+ "renew": "4 2017/07/27 18:02:30",
+ "expire": "5 2017/07/28 07:08:15",
+ }
+ ]
+ write_file(lease_file, content)
+ self.assertCountEqual(expected, parse_dhcp_lease_file(lease_file))
+
+ @mock.patch("cloudinit.net.dhcp.EphemeralIPv4Network")
+ @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+ def test_obtain_lease_parses_static_routes(self, m_maybe, m_ipv4):
+ """EphemeralDHPCv4 parses rfc3442 routes for EphemeralIPv4Network"""
+ lease = [
+ {
+ "interface": "wlp3s0",
+ "fixed-address": "192.168.2.74",
+ "subnet-mask": "255.255.255.0",
+ "routers": "192.168.2.1",
+ "rfc3442-classless-static-routes": "0,130,56,240,1",
+ "renew": "4 2017/07/27 18:02:30",
+ "expire": "5 2017/07/28 07:08:15",
+ }
+ ]
+ m_maybe.return_value = lease
+ eph = net.dhcp.EphemeralDHCPv4()
+ eph.obtain_lease()
+ expected_kwargs = {
+ "interface": "wlp3s0",
+ "ip": "192.168.2.74",
+ "prefix_or_mask": "255.255.255.0",
+ "broadcast": "192.168.2.255",
+ "static_routes": [("0.0.0.0/0", "130.56.240.1")],
+ "router": "192.168.2.1",
+ }
+ m_ipv4.assert_called_with(**expected_kwargs)
+
+ @mock.patch("cloudinit.net.dhcp.EphemeralIPv4Network")
+ @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+ def test_obtain_centos_lease_parses_static_routes(self, m_maybe, m_ipv4):
+ """
+ EphemeralDHPCv4 parses rfc3442 routes for EphemeralIPv4Network
+ for Centos Lease format
+ """
+ lease = [
+ {
+ "interface": "wlp3s0",
+ "fixed-address": "192.168.2.74",
+ "subnet-mask": "255.255.255.0",
+ "routers": "192.168.2.1",
+ "classless-static-routes": "0 130.56.240.1",
+ "renew": "4 2017/07/27 18:02:30",
+ "expire": "5 2017/07/28 07:08:15",
+ }
+ ]
+ m_maybe.return_value = lease
+ eph = net.dhcp.EphemeralDHCPv4()
+ eph.obtain_lease()
+ expected_kwargs = {
+ "interface": "wlp3s0",
+ "ip": "192.168.2.74",
+ "prefix_or_mask": "255.255.255.0",
+ "broadcast": "192.168.2.255",
+ "static_routes": [("0.0.0.0/0", "130.56.240.1")],
+ "router": "192.168.2.1",
+ }
+ m_ipv4.assert_called_with(**expected_kwargs)
+
+
+class TestDHCPParseStaticRoutes(CiTestCase):
+
+ with_logs = True
+
+ def parse_static_routes_empty_string(self):
+ self.assertEqual([], parse_static_routes(""))
+
+ def test_parse_static_routes_invalid_input_returns_empty_list(self):
+ rfc3442 = "32,169,254,169,254,130,56,248"
+ self.assertEqual([], parse_static_routes(rfc3442))
+
+ def test_parse_static_routes_bogus_width_returns_empty_list(self):
+ rfc3442 = "33,169,254,169,254,130,56,248"
+ self.assertEqual([], parse_static_routes(rfc3442))
+
+ def test_parse_static_routes_single_ip(self):
+ rfc3442 = "32,169,254,169,254,130,56,248,255"
+ self.assertEqual(
+ [("169.254.169.254/32", "130.56.248.255")],
+ parse_static_routes(rfc3442),
+ )
+
+ def test_parse_static_routes_single_ip_handles_trailing_semicolon(self):
+ rfc3442 = "32,169,254,169,254,130,56,248,255;"
+ self.assertEqual(
+ [("169.254.169.254/32", "130.56.248.255")],
+ parse_static_routes(rfc3442),
+ )
+
+ def test_parse_static_routes_default_route(self):
+ rfc3442 = "0,130,56,240,1"
+ self.assertEqual(
+ [("0.0.0.0/0", "130.56.240.1")], parse_static_routes(rfc3442)
+ )
+
+ def test_unspecified_gateway(self):
+ rfc3442 = "32,169,254,169,254,0,0,0,0"
+ self.assertEqual(
+ [("169.254.169.254/32", "0.0.0.0")], parse_static_routes(rfc3442)
+ )
+
+ def test_parse_static_routes_class_c_b_a(self):
+ class_c = "24,192,168,74,192,168,0,4"
+ class_b = "16,172,16,172,16,0,4"
+ class_a = "8,10,10,0,0,4"
+ rfc3442 = ",".join([class_c, class_b, class_a])
+ self.assertEqual(
+ sorted(
+ [
+ ("192.168.74.0/24", "192.168.0.4"),
+ ("172.16.0.0/16", "172.16.0.4"),
+ ("10.0.0.0/8", "10.0.0.4"),
+ ]
+ ),
+ sorted(parse_static_routes(rfc3442)),
+ )
+
+ def test_parse_static_routes_logs_error_truncated(self):
+ bad_rfc3442 = {
+ "class_c": "24,169,254,169,10",
+ "class_b": "16,172,16,10",
+ "class_a": "8,10,10",
+ "gateway": "0,0",
+ "netlen": "33,0",
+ }
+ for rfc3442 in bad_rfc3442.values():
+ self.assertEqual([], parse_static_routes(rfc3442))
+
+ logs = self.logs.getvalue()
+ self.assertEqual(len(bad_rfc3442.keys()), len(logs.splitlines()))
+
+ def test_parse_static_routes_returns_valid_routes_until_parse_err(self):
+ class_c = "24,192,168,74,192,168,0,4"
+ class_b = "16,172,16,172,16,0,4"
+ class_a_error = "8,10,10,0,0"
+ rfc3442 = ",".join([class_c, class_b, class_a_error])
+ self.assertEqual(
+ sorted(
+ [
+ ("192.168.74.0/24", "192.168.0.4"),
+ ("172.16.0.0/16", "172.16.0.4"),
+ ]
+ ),
+ sorted(parse_static_routes(rfc3442)),
+ )
+
+ logs = self.logs.getvalue()
+ self.assertIn(rfc3442, logs.splitlines()[0])
+
+ def test_redhat_format(self):
+ redhat_format = "24.191.168.128 192.168.128.1,0 192.168.128.1"
+ self.assertEqual(
+ sorted(
+ [
+ ("191.168.128.0/24", "192.168.128.1"),
+ ("0.0.0.0/0", "192.168.128.1"),
+ ]
+ ),
+ sorted(parse_static_routes(redhat_format)),
+ )
+
+ def test_redhat_format_with_a_space_too_much_after_comma(self):
+ redhat_format = "24.191.168.128 192.168.128.1, 0 192.168.128.1"
+ self.assertEqual(
+ sorted(
+ [
+ ("191.168.128.0/24", "192.168.128.1"),
+ ("0.0.0.0/0", "192.168.128.1"),
+ ]
+ ),
+ sorted(parse_static_routes(redhat_format)),
+ )
+
+
+class TestDHCPDiscoveryClean(CiTestCase):
+ with_logs = True
+
+ @mock.patch("cloudinit.net.dhcp.find_fallback_nic")
+ def test_no_fallback_nic_found(self, m_fallback_nic):
+ """Log and do nothing when nic is absent and no fallback is found."""
+ m_fallback_nic.return_value = None # No fallback nic found
+ self.assertEqual([], maybe_perform_dhcp_discovery())
+ self.assertIn(
+ "Skip dhcp_discovery: Unable to find fallback nic.",
+ self.logs.getvalue(),
+ )
+
+ def test_provided_nic_does_not_exist(self):
+ """When the provided nic doesn't exist, log a message and no-op."""
+ self.assertEqual([], maybe_perform_dhcp_discovery("idontexist"))
+ self.assertIn(
+ "Skip dhcp_discovery: nic idontexist not found in get_devicelist.",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("cloudinit.net.dhcp.subp.which")
+ @mock.patch("cloudinit.net.dhcp.find_fallback_nic")
+ def test_absent_dhclient_command(self, m_fallback, m_which):
+ """When dhclient doesn't exist in the OS, log the issue and no-op."""
+ m_fallback.return_value = "eth9"
+ m_which.return_value = None # dhclient isn't found
+ self.assertEqual([], maybe_perform_dhcp_discovery())
+ self.assertIn(
+ "Skip dhclient configuration: No dhclient command found.",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("cloudinit.temp_utils.os.getuid")
+ @mock.patch("cloudinit.net.dhcp.dhcp_discovery")
+ @mock.patch("cloudinit.net.dhcp.subp.which")
+ @mock.patch("cloudinit.net.dhcp.find_fallback_nic")
+ def test_dhclient_run_with_tmpdir(self, m_fback, m_which, m_dhcp, m_uid):
+ """maybe_perform_dhcp_discovery passes tmpdir to dhcp_discovery."""
+ m_uid.return_value = 0 # Fake root user for tmpdir
+ m_fback.return_value = "eth9"
+ m_which.return_value = "/sbin/dhclient"
+ m_dhcp.return_value = {"address": "192.168.2.2"}
+ retval = wrap_and_call(
+ "cloudinit.temp_utils",
+ {"_TMPDIR": {"new": None}, "os.getuid": 0},
+ maybe_perform_dhcp_discovery,
+ )
+ self.assertEqual({"address": "192.168.2.2"}, retval)
+ self.assertEqual(
+ 1, m_dhcp.call_count, "dhcp_discovery not called once"
+ )
+ call = m_dhcp.call_args_list[0]
+ self.assertEqual("/sbin/dhclient", call[0][0])
+ self.assertEqual("eth9", call[0][1])
+ self.assertIn("/var/tmp/cloud-init/cloud-init-dhcp-", call[0][2])
+
+ @mock.patch("time.sleep", mock.MagicMock())
+ @mock.patch("cloudinit.net.dhcp.os.kill")
+ @mock.patch("cloudinit.net.dhcp.subp.subp")
+ def test_dhcp_discovery_run_in_sandbox_warns_invalid_pid(
+ self, m_subp, m_kill
+ ):
+ """dhcp_discovery logs a warning when pidfile contains invalid content.
+
+ Lease processing still occurs and no proc kill is attempted.
+ """
+ m_subp.return_value = ("", "")
+ tmpdir = self.tmp_dir()
+ dhclient_script = os.path.join(tmpdir, "dhclient.orig")
+ script_content = "#!/bin/bash\necho fake-dhclient"
+ write_file(dhclient_script, script_content, mode=0o755)
+ write_file(self.tmp_path("dhclient.pid", tmpdir), "") # Empty pid ''
+ lease_content = dedent(
+ """
+ lease {
+ interface "eth9";
+ fixed-address 192.168.2.74;
+ option subnet-mask 255.255.255.0;
+ option routers 192.168.2.1;
+ }
+ """
+ )
+ write_file(self.tmp_path("dhcp.leases", tmpdir), lease_content)
+
+ self.assertCountEqual(
+ [
+ {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.74",
+ "subnet-mask": "255.255.255.0",
+ "routers": "192.168.2.1",
+ }
+ ],
+ dhcp_discovery(dhclient_script, "eth9", tmpdir),
+ )
+ self.assertIn(
+ "dhclient(pid=, parentpid=unknown) failed "
+ "to daemonize after 10.0 seconds",
+ self.logs.getvalue(),
+ )
+ m_kill.assert_not_called()
+
+ @mock.patch("cloudinit.net.dhcp.util.get_proc_ppid")
+ @mock.patch("cloudinit.net.dhcp.os.kill")
+ @mock.patch("cloudinit.net.dhcp.util.wait_for_files")
+ @mock.patch("cloudinit.net.dhcp.subp.subp")
+ def test_dhcp_discovery_run_in_sandbox_waits_on_lease_and_pid(
+ self, m_subp, m_wait, m_kill, m_getppid
+ ):
+ """dhcp_discovery waits for the presence of pidfile and dhcp.leases."""
+ m_subp.return_value = ("", "")
+ tmpdir = self.tmp_dir()
+ dhclient_script = os.path.join(tmpdir, "dhclient.orig")
+ script_content = "#!/bin/bash\necho fake-dhclient"
+ write_file(dhclient_script, script_content, mode=0o755)
+ # Don't create pid or leases file
+ pidfile = self.tmp_path("dhclient.pid", tmpdir)
+ leasefile = self.tmp_path("dhcp.leases", tmpdir)
+ m_wait.return_value = [pidfile] # Return the missing pidfile wait for
+ m_getppid.return_value = 1 # Indicate that dhclient has daemonized
+ self.assertEqual([], dhcp_discovery(dhclient_script, "eth9", tmpdir))
+ self.assertEqual(
+ mock.call([pidfile, leasefile], maxwait=5, naplen=0.01),
+ m_wait.call_args_list[0],
+ )
+ self.assertIn(
+ "WARNING: dhclient did not produce expected files: dhclient.pid",
+ self.logs.getvalue(),
+ )
+ m_kill.assert_not_called()
+
+ @mock.patch("cloudinit.net.dhcp.util.get_proc_ppid")
+ @mock.patch("cloudinit.net.dhcp.os.kill")
+ @mock.patch("cloudinit.net.dhcp.subp.subp")
+ def test_dhcp_discovery_run_in_sandbox(self, m_subp, m_kill, m_getppid):
+ """dhcp_discovery brings up the interface and runs dhclient.
+
+ It also returns the parsed dhcp.leases file generated in the sandbox.
+ """
+ m_subp.return_value = ("", "")
+ tmpdir = self.tmp_dir()
+ dhclient_script = os.path.join(tmpdir, "dhclient.orig")
+ script_content = "#!/bin/bash\necho fake-dhclient"
+ write_file(dhclient_script, script_content, mode=0o755)
+ lease_content = dedent(
+ """
+ lease {
+ interface "eth9";
+ fixed-address 192.168.2.74;
+ option subnet-mask 255.255.255.0;
+ option routers 192.168.2.1;
+ }
+ """
+ )
+ lease_file = os.path.join(tmpdir, "dhcp.leases")
+ write_file(lease_file, lease_content)
+ pid_file = os.path.join(tmpdir, "dhclient.pid")
+ my_pid = 1
+ write_file(pid_file, "%d\n" % my_pid)
+ m_getppid.return_value = 1 # Indicate that dhclient has daemonized
+
+ self.assertCountEqual(
+ [
+ {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.74",
+ "subnet-mask": "255.255.255.0",
+ "routers": "192.168.2.1",
+ }
+ ],
+ dhcp_discovery(dhclient_script, "eth9", tmpdir),
+ )
+ # dhclient script got copied
+ with open(os.path.join(tmpdir, "dhclient")) as stream:
+ self.assertEqual(script_content, stream.read())
+ # Interface was brought up before dhclient called from sandbox
+ m_subp.assert_has_calls(
+ [
+ mock.call(
+ ["ip", "link", "set", "dev", "eth9", "up"], capture=True
+ ),
+ mock.call(
+ [
+ os.path.join(tmpdir, "dhclient"),
+ "-1",
+ "-v",
+ "-lf",
+ lease_file,
+ "-pf",
+ os.path.join(tmpdir, "dhclient.pid"),
+ "eth9",
+ "-sf",
+ "/bin/true",
+ ],
+ capture=True,
+ ),
+ ]
+ )
+ m_kill.assert_has_calls([mock.call(my_pid, signal.SIGKILL)])
+
+ @mock.patch("cloudinit.net.dhcp.util.get_proc_ppid")
+ @mock.patch("cloudinit.net.dhcp.os.kill")
+ @mock.patch("cloudinit.net.dhcp.subp.subp")
+ def test_dhcp_discovery_outside_sandbox(self, m_subp, m_kill, m_getppid):
+ """dhcp_discovery brings up the interface and runs dhclient.
+
+ It also returns the parsed dhcp.leases file generated in the sandbox.
+ """
+ m_subp.return_value = ("", "")
+ tmpdir = self.tmp_dir()
+ dhclient_script = os.path.join(tmpdir, "dhclient.orig")
+ script_content = "#!/bin/bash\necho fake-dhclient"
+ write_file(dhclient_script, script_content, mode=0o755)
+ lease_content = dedent(
+ """
+ lease {
+ interface "eth9";
+ fixed-address 192.168.2.74;
+ option subnet-mask 255.255.255.0;
+ option routers 192.168.2.1;
+ }
+ """
+ )
+ lease_file = os.path.join(tmpdir, "dhcp.leases")
+ write_file(lease_file, lease_content)
+ pid_file = os.path.join(tmpdir, "dhclient.pid")
+ my_pid = 1
+ write_file(pid_file, "%d\n" % my_pid)
+ m_getppid.return_value = 1 # Indicate that dhclient has daemonized
+
+ with mock.patch("os.access", return_value=False):
+ self.assertCountEqual(
+ [
+ {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.74",
+ "subnet-mask": "255.255.255.0",
+ "routers": "192.168.2.1",
+ }
+ ],
+ dhcp_discovery(dhclient_script, "eth9", tmpdir),
+ )
+ # dhclient script got copied
+ with open(os.path.join(tmpdir, "dhclient.orig")) as stream:
+ self.assertEqual(script_content, stream.read())
+ # Interface was brought up before dhclient called from sandbox
+ m_subp.assert_has_calls(
+ [
+ mock.call(
+ ["ip", "link", "set", "dev", "eth9", "up"], capture=True
+ ),
+ mock.call(
+ [
+ os.path.join(tmpdir, "dhclient.orig"),
+ "-1",
+ "-v",
+ "-lf",
+ lease_file,
+ "-pf",
+ os.path.join(tmpdir, "dhclient.pid"),
+ "eth9",
+ "-sf",
+ "/bin/true",
+ ],
+ capture=True,
+ ),
+ ]
+ )
+ m_kill.assert_has_calls([mock.call(my_pid, signal.SIGKILL)])
+
+ @mock.patch("cloudinit.net.dhcp.util.get_proc_ppid")
+ @mock.patch("cloudinit.net.dhcp.os.kill")
+ @mock.patch("cloudinit.net.dhcp.subp.subp")
+ def test_dhcp_output_error_stream(self, m_subp, m_kill, m_getppid):
+ """ "dhcp_log_func is called with the output and error streams of
+ dhclinet when the callable is passed."""
+ dhclient_err = "FAKE DHCLIENT ERROR"
+ dhclient_out = "FAKE DHCLIENT OUT"
+ m_subp.return_value = (dhclient_out, dhclient_err)
+ tmpdir = self.tmp_dir()
+ dhclient_script = os.path.join(tmpdir, "dhclient.orig")
+ script_content = "#!/bin/bash\necho fake-dhclient"
+ write_file(dhclient_script, script_content, mode=0o755)
+ lease_content = dedent(
+ """
+ lease {
+ interface "eth9";
+ fixed-address 192.168.2.74;
+ option subnet-mask 255.255.255.0;
+ option routers 192.168.2.1;
+ }
+ """
+ )
+ lease_file = os.path.join(tmpdir, "dhcp.leases")
+ write_file(lease_file, lease_content)
+ pid_file = os.path.join(tmpdir, "dhclient.pid")
+ my_pid = 1
+ write_file(pid_file, "%d\n" % my_pid)
+ m_getppid.return_value = 1 # Indicate that dhclient has daemonized
+
+ def dhcp_log_func(out, err):
+ self.assertEqual(out, dhclient_out)
+ self.assertEqual(err, dhclient_err)
+
+ dhcp_discovery(
+ dhclient_script, "eth9", tmpdir, dhcp_log_func=dhcp_log_func
+ )
+
+
+class TestSystemdParseLeases(CiTestCase):
+
+ lxd_lease = dedent(
+ """\
+ # This is private data. Do not parse.
+ ADDRESS=10.75.205.242
+ NETMASK=255.255.255.0
+ ROUTER=10.75.205.1
+ SERVER_ADDRESS=10.75.205.1
+ NEXT_SERVER=10.75.205.1
+ BROADCAST=10.75.205.255
+ T1=1580
+ T2=2930
+ LIFETIME=3600
+ DNS=10.75.205.1
+ DOMAINNAME=lxd
+ HOSTNAME=a1
+ CLIENTID=ffe617693400020000ab110c65a6a0866931c2
+ """
+ )
+
+ lxd_parsed = {
+ "ADDRESS": "10.75.205.242",
+ "NETMASK": "255.255.255.0",
+ "ROUTER": "10.75.205.1",
+ "SERVER_ADDRESS": "10.75.205.1",
+ "NEXT_SERVER": "10.75.205.1",
+ "BROADCAST": "10.75.205.255",
+ "T1": "1580",
+ "T2": "2930",
+ "LIFETIME": "3600",
+ "DNS": "10.75.205.1",
+ "DOMAINNAME": "lxd",
+ "HOSTNAME": "a1",
+ "CLIENTID": "ffe617693400020000ab110c65a6a0866931c2",
+ }
+
+ azure_lease = dedent(
+ """\
+ # This is private data. Do not parse.
+ ADDRESS=10.132.0.5
+ NETMASK=255.255.255.255
+ ROUTER=10.132.0.1
+ SERVER_ADDRESS=169.254.169.254
+ NEXT_SERVER=10.132.0.1
+ MTU=1460
+ T1=43200
+ T2=75600
+ LIFETIME=86400
+ DNS=169.254.169.254
+ NTP=169.254.169.254
+ DOMAINNAME=c.ubuntu-foundations.internal
+ DOMAIN_SEARCH_LIST=c.ubuntu-foundations.internal google.internal
+ HOSTNAME=tribaal-test-171002-1349.c.ubuntu-foundations.internal
+ ROUTES=10.132.0.1/32,0.0.0.0 0.0.0.0/0,10.132.0.1
+ CLIENTID=ff405663a200020000ab11332859494d7a8b4c
+ OPTION_245=624c3620
+ """
+ )
+
+ azure_parsed = {
+ "ADDRESS": "10.132.0.5",
+ "NETMASK": "255.255.255.255",
+ "ROUTER": "10.132.0.1",
+ "SERVER_ADDRESS": "169.254.169.254",
+ "NEXT_SERVER": "10.132.0.1",
+ "MTU": "1460",
+ "T1": "43200",
+ "T2": "75600",
+ "LIFETIME": "86400",
+ "DNS": "169.254.169.254",
+ "NTP": "169.254.169.254",
+ "DOMAINNAME": "c.ubuntu-foundations.internal",
+ "DOMAIN_SEARCH_LIST": "c.ubuntu-foundations.internal google.internal",
+ "HOSTNAME": "tribaal-test-171002-1349.c.ubuntu-foundations.internal",
+ "ROUTES": "10.132.0.1/32,0.0.0.0 0.0.0.0/0,10.132.0.1",
+ "CLIENTID": "ff405663a200020000ab11332859494d7a8b4c",
+ "OPTION_245": "624c3620",
+ }
+
+ def setUp(self):
+ super(TestSystemdParseLeases, self).setUp()
+ self.lease_d = self.tmp_dir()
+
+ def test_no_leases_returns_empty_dict(self):
+ """A leases dir with no lease files should return empty dictionary."""
+ self.assertEqual({}, networkd_load_leases(self.lease_d))
+
+ def test_no_leases_dir_returns_empty_dict(self):
+ """A non-existing leases dir should return empty dict."""
+ enodir = os.path.join(self.lease_d, "does-not-exist")
+ self.assertEqual({}, networkd_load_leases(enodir))
+
+ def test_single_leases_file(self):
+ """A leases dir with one leases file."""
+ populate_dir(self.lease_d, {"2": self.lxd_lease})
+ self.assertEqual(
+ {"2": self.lxd_parsed}, networkd_load_leases(self.lease_d)
+ )
+
+ def test_single_azure_leases_file(self):
+ """On Azure, option 245 should be present, verify it specifically."""
+ populate_dir(self.lease_d, {"1": self.azure_lease})
+ self.assertEqual(
+ {"1": self.azure_parsed}, networkd_load_leases(self.lease_d)
+ )
+
+ def test_multiple_files(self):
+ """Multiple leases files on azure with one found return that value."""
+ self.maxDiff = None
+ populate_dir(
+ self.lease_d, {"1": self.azure_lease, "9": self.lxd_lease}
+ )
+ self.assertEqual(
+ {"1": self.azure_parsed, "9": self.lxd_parsed},
+ networkd_load_leases(self.lease_d),
+ )
+
+
+class TestEphemeralDhcpNoNetworkSetup(HttprettyTestCase):
+ @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+ def test_ephemeral_dhcp_no_network_if_url_connectivity(self, m_dhcp):
+ """No EphemeralDhcp4 network setup when connectivity_url succeeds."""
+ url = "http://example.org/index.html"
+
+ httpretty.register_uri(httpretty.GET, url)
+ with net.dhcp.EphemeralDHCPv4(
+ connectivity_url_data={"url": url},
+ ) as lease:
+ self.assertIsNone(lease)
+ # Ensure that no teardown happens:
+ m_dhcp.assert_not_called()
+
+ @mock.patch("cloudinit.net.dhcp.subp.subp")
+ @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+ def test_ephemeral_dhcp_setup_network_if_url_connectivity(
+ self, m_dhcp, m_subp
+ ):
+ """No EphemeralDhcp4 network setup when connectivity_url succeeds."""
+ url = "http://example.org/index.html"
+ fake_lease = {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.2",
+ "subnet-mask": "255.255.0.0",
+ }
+ m_dhcp.return_value = [fake_lease]
+ m_subp.return_value = ("", "")
+
+ httpretty.register_uri(httpretty.GET, url, body={}, status=404)
+ with net.dhcp.EphemeralDHCPv4(
+ connectivity_url_data={"url": url},
+ ) as lease:
+ self.assertEqual(fake_lease, lease)
+ # Ensure that dhcp discovery occurs
+ m_dhcp.called_once_with()
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/net/test_init.py b/tests/unittests/net/test_init.py
new file mode 100644
index 00000000..18b3fe59
--- /dev/null
+++ b/tests/unittests/net/test_init.py
@@ -0,0 +1,1734 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import copy
+import errno
+import ipaddress
+import os
+import textwrap
+from unittest import mock
+
+import httpretty
+import pytest
+import requests
+
+import cloudinit.net as net
+from cloudinit import safeyaml as yaml
+from cloudinit.subp import ProcessExecutionError
+from cloudinit.util import ensure_file, write_file
+from tests.unittests.helpers import CiTestCase, HttprettyTestCase
+
+
+class TestSysDevPath(CiTestCase):
+ def test_sys_dev_path(self):
+ """sys_dev_path returns a path under SYS_CLASS_NET for a device."""
+ dev = "something"
+ path = "attribute"
+ expected = net.SYS_CLASS_NET + dev + "/" + path
+ self.assertEqual(expected, net.sys_dev_path(dev, path))
+
+ def test_sys_dev_path_without_path(self):
+ """When path param isn't provided it defaults to empty string."""
+ dev = "something"
+ expected = net.SYS_CLASS_NET + dev + "/"
+ self.assertEqual(expected, net.sys_dev_path(dev))
+
+
+class TestReadSysNet(CiTestCase):
+ with_logs = True
+
+ def setUp(self):
+ super(TestReadSysNet, self).setUp()
+ sys_mock = mock.patch("cloudinit.net.get_sys_class_path")
+ self.m_sys_path = sys_mock.start()
+ self.sysdir = self.tmp_dir() + "/"
+ self.m_sys_path.return_value = self.sysdir
+ self.addCleanup(sys_mock.stop)
+
+ def test_read_sys_net_strips_contents_of_sys_path(self):
+ """read_sys_net strips whitespace from the contents of a sys file."""
+ content = "some stuff with trailing whitespace\t\r\n"
+ write_file(os.path.join(self.sysdir, "dev", "attr"), content)
+ self.assertEqual(content.strip(), net.read_sys_net("dev", "attr"))
+
+ def test_read_sys_net_reraises_oserror(self):
+ """read_sys_net raises OSError/IOError when file doesn't exist."""
+ # Non-specific Exception because versions of python OSError vs IOError.
+ with self.assertRaises(Exception) as context_manager: # noqa: H202
+ net.read_sys_net("dev", "attr")
+ error = context_manager.exception
+ self.assertIn("No such file or directory", str(error))
+
+ def test_read_sys_net_handles_error_with_on_enoent(self):
+ """read_sys_net handles OSError/IOError with on_enoent if provided."""
+ handled_errors = []
+
+ def on_enoent(e):
+ handled_errors.append(e)
+
+ net.read_sys_net("dev", "attr", on_enoent=on_enoent)
+ error = handled_errors[0]
+ self.assertIsInstance(error, Exception)
+ self.assertIn("No such file or directory", str(error))
+
+ def test_read_sys_net_translates_content(self):
+ """read_sys_net translates content when translate dict is provided."""
+ content = "you're welcome\n"
+ write_file(os.path.join(self.sysdir, "dev", "attr"), content)
+ translate = {"you're welcome": "de nada"}
+ self.assertEqual(
+ "de nada", net.read_sys_net("dev", "attr", translate=translate)
+ )
+
+ def test_read_sys_net_errors_on_translation_failures(self):
+ """read_sys_net raises a KeyError and logs details on failure."""
+ content = "you're welcome\n"
+ write_file(os.path.join(self.sysdir, "dev", "attr"), content)
+ with self.assertRaises(KeyError) as context_manager:
+ net.read_sys_net("dev", "attr", translate={})
+ error = context_manager.exception
+ self.assertEqual('"you\'re welcome"', str(error))
+ self.assertIn(
+ "Found unexpected (not translatable) value 'you're welcome' in "
+ "'{0}dev/attr".format(self.sysdir),
+ self.logs.getvalue(),
+ )
+
+ def test_read_sys_net_handles_handles_with_onkeyerror(self):
+ """read_sys_net handles translation errors calling on_keyerror."""
+ content = "you're welcome\n"
+ write_file(os.path.join(self.sysdir, "dev", "attr"), content)
+ handled_errors = []
+
+ def on_keyerror(e):
+ handled_errors.append(e)
+
+ net.read_sys_net("dev", "attr", translate={}, on_keyerror=on_keyerror)
+ error = handled_errors[0]
+ self.assertIsInstance(error, KeyError)
+ self.assertEqual('"you\'re welcome"', str(error))
+
+ def test_read_sys_net_safe_false_on_translate_failure(self):
+ """read_sys_net_safe returns False on translation failures."""
+ content = "you're welcome\n"
+ write_file(os.path.join(self.sysdir, "dev", "attr"), content)
+ self.assertFalse(net.read_sys_net_safe("dev", "attr", translate={}))
+
+ def test_read_sys_net_safe_returns_false_on_noent_failure(self):
+ """read_sys_net_safe returns False on file not found failures."""
+ self.assertFalse(net.read_sys_net_safe("dev", "attr"))
+
+ def test_read_sys_net_int_returns_none_on_error(self):
+ """read_sys_net_safe returns None on failures."""
+ self.assertFalse(net.read_sys_net_int("dev", "attr"))
+
+ def test_read_sys_net_int_returns_none_on_valueerror(self):
+ """read_sys_net_safe returns None when content is not an int."""
+ write_file(os.path.join(self.sysdir, "dev", "attr"), "NOTINT\n")
+ self.assertFalse(net.read_sys_net_int("dev", "attr"))
+
+ def test_read_sys_net_int_returns_integer_from_content(self):
+ """read_sys_net_safe returns None on failures."""
+ write_file(os.path.join(self.sysdir, "dev", "attr"), "1\n")
+ self.assertEqual(1, net.read_sys_net_int("dev", "attr"))
+
+ def test_is_up_true(self):
+ """is_up is True if sys/net/devname/operstate is 'up' or 'unknown'."""
+ for state in ["up", "unknown"]:
+ write_file(os.path.join(self.sysdir, "eth0", "operstate"), state)
+ self.assertTrue(net.is_up("eth0"))
+
+ def test_is_up_false(self):
+ """is_up is False if sys/net/devname/operstate is 'down' or invalid."""
+ for state in ["down", "incomprehensible"]:
+ write_file(os.path.join(self.sysdir, "eth0", "operstate"), state)
+ self.assertFalse(net.is_up("eth0"))
+
+ def test_is_bridge(self):
+ """is_bridge is True when /sys/net/devname/bridge exists."""
+ self.assertFalse(net.is_bridge("eth0"))
+ ensure_file(os.path.join(self.sysdir, "eth0", "bridge"))
+ self.assertTrue(net.is_bridge("eth0"))
+
+ def test_is_bond(self):
+ """is_bond is True when /sys/net/devname/bonding exists."""
+ self.assertFalse(net.is_bond("eth0"))
+ ensure_file(os.path.join(self.sysdir, "eth0", "bonding"))
+ self.assertTrue(net.is_bond("eth0"))
+
+ def test_get_master(self):
+ """get_master returns the path when /sys/net/devname/master exists."""
+ self.assertIsNone(net.get_master("enP1s1"))
+ master_path = os.path.join(self.sysdir, "enP1s1", "master")
+ ensure_file(master_path)
+ self.assertEqual(master_path, net.get_master("enP1s1"))
+
+ def test_master_is_bridge_or_bond(self):
+ bridge_mac = "aa:bb:cc:aa:bb:cc"
+ bond_mac = "cc:bb:aa:cc:bb:aa"
+
+ # No master => False
+ write_file(os.path.join(self.sysdir, "eth1", "address"), bridge_mac)
+ write_file(os.path.join(self.sysdir, "eth2", "address"), bond_mac)
+
+ self.assertFalse(net.master_is_bridge_or_bond("eth1"))
+ self.assertFalse(net.master_is_bridge_or_bond("eth2"))
+
+ # masters without bridge/bonding => False
+ write_file(os.path.join(self.sysdir, "br0", "address"), bridge_mac)
+ write_file(os.path.join(self.sysdir, "bond0", "address"), bond_mac)
+
+ os.symlink("../br0", os.path.join(self.sysdir, "eth1", "master"))
+ os.symlink("../bond0", os.path.join(self.sysdir, "eth2", "master"))
+
+ self.assertFalse(net.master_is_bridge_or_bond("eth1"))
+ self.assertFalse(net.master_is_bridge_or_bond("eth2"))
+
+ # masters with bridge/bonding => True
+ write_file(os.path.join(self.sysdir, "br0", "bridge"), "")
+ write_file(os.path.join(self.sysdir, "bond0", "bonding"), "")
+
+ self.assertTrue(net.master_is_bridge_or_bond("eth1"))
+ self.assertTrue(net.master_is_bridge_or_bond("eth2"))
+
+ def test_master_is_openvswitch(self):
+ ovs_mac = "bb:cc:aa:bb:cc:aa"
+
+ # No master => False
+ write_file(os.path.join(self.sysdir, "eth1", "address"), ovs_mac)
+
+ self.assertFalse(net.master_is_bridge_or_bond("eth1"))
+
+ # masters without ovs-system => False
+ write_file(os.path.join(self.sysdir, "ovs-system", "address"), ovs_mac)
+
+ os.symlink(
+ "../ovs-system", os.path.join(self.sysdir, "eth1", "master")
+ )
+
+ self.assertFalse(net.master_is_openvswitch("eth1"))
+
+ # masters with ovs-system => True
+ os.symlink(
+ "../ovs-system",
+ os.path.join(self.sysdir, "eth1", "upper_ovs-system"),
+ )
+
+ self.assertTrue(net.master_is_openvswitch("eth1"))
+
+ def test_is_vlan(self):
+ """is_vlan is True when /sys/net/devname/uevent has DEVTYPE=vlan."""
+ ensure_file(os.path.join(self.sysdir, "eth0", "uevent"))
+ self.assertFalse(net.is_vlan("eth0"))
+ content = "junk\nDEVTYPE=vlan\njunk\n"
+ write_file(os.path.join(self.sysdir, "eth0", "uevent"), content)
+ self.assertTrue(net.is_vlan("eth0"))
+
+
+class TestGenerateFallbackConfig(CiTestCase):
+ def setUp(self):
+ super(TestGenerateFallbackConfig, self).setUp()
+ sys_mock = mock.patch("cloudinit.net.get_sys_class_path")
+ self.m_sys_path = sys_mock.start()
+ self.sysdir = self.tmp_dir() + "/"
+ self.m_sys_path.return_value = self.sysdir
+ self.addCleanup(sys_mock.stop)
+ self.add_patch(
+ "cloudinit.net.util.is_container",
+ "m_is_container",
+ return_value=False,
+ )
+ self.add_patch("cloudinit.net.util.udevadm_settle", "m_settle")
+ self.add_patch(
+ "cloudinit.net.is_netfailover", "m_netfail", return_value=False
+ )
+ self.add_patch(
+ "cloudinit.net.is_netfail_master",
+ "m_netfail_master",
+ return_value=False,
+ )
+
+ def test_generate_fallback_finds_connected_eth_with_mac(self):
+ """generate_fallback_config finds any connected device with a mac."""
+ write_file(os.path.join(self.sysdir, "eth0", "carrier"), "1")
+ write_file(os.path.join(self.sysdir, "eth1", "carrier"), "1")
+ mac = "aa:bb:cc:aa:bb:cc"
+ write_file(os.path.join(self.sysdir, "eth1", "address"), mac)
+ expected = {
+ "ethernets": {
+ "eth1": {
+ "match": {"macaddress": mac},
+ "dhcp4": True,
+ "set-name": "eth1",
+ }
+ },
+ "version": 2,
+ }
+ self.assertEqual(expected, net.generate_fallback_config())
+
+ def test_generate_fallback_finds_dormant_eth_with_mac(self):
+ """generate_fallback_config finds any dormant device with a mac."""
+ write_file(os.path.join(self.sysdir, "eth0", "dormant"), "1")
+ mac = "aa:bb:cc:aa:bb:cc"
+ write_file(os.path.join(self.sysdir, "eth0", "address"), mac)
+ expected = {
+ "ethernets": {
+ "eth0": {
+ "match": {"macaddress": mac},
+ "dhcp4": True,
+ "set-name": "eth0",
+ }
+ },
+ "version": 2,
+ }
+ self.assertEqual(expected, net.generate_fallback_config())
+
+ def test_generate_fallback_finds_eth_by_operstate(self):
+ """generate_fallback_config finds any dormant device with a mac."""
+ mac = "aa:bb:cc:aa:bb:cc"
+ write_file(os.path.join(self.sysdir, "eth0", "address"), mac)
+ expected = {
+ "ethernets": {
+ "eth0": {
+ "dhcp4": True,
+ "match": {"macaddress": mac},
+ "set-name": "eth0",
+ }
+ },
+ "version": 2,
+ }
+ valid_operstates = ["dormant", "down", "lowerlayerdown", "unknown"]
+ for state in valid_operstates:
+ write_file(os.path.join(self.sysdir, "eth0", "operstate"), state)
+ self.assertEqual(expected, net.generate_fallback_config())
+ write_file(os.path.join(self.sysdir, "eth0", "operstate"), "noworky")
+ self.assertIsNone(net.generate_fallback_config())
+
+ def test_generate_fallback_config_skips_veth(self):
+ """generate_fallback_config will skip any veth interfaces."""
+ # A connected veth which gets ignored
+ write_file(os.path.join(self.sysdir, "veth0", "carrier"), "1")
+ self.assertIsNone(net.generate_fallback_config())
+
+ def test_generate_fallback_config_skips_bridges(self):
+ """generate_fallback_config will skip any bridges interfaces."""
+ # A connected veth which gets ignored
+ write_file(os.path.join(self.sysdir, "eth0", "carrier"), "1")
+ mac = "aa:bb:cc:aa:bb:cc"
+ write_file(os.path.join(self.sysdir, "eth0", "address"), mac)
+ ensure_file(os.path.join(self.sysdir, "eth0", "bridge"))
+ self.assertIsNone(net.generate_fallback_config())
+
+ def test_generate_fallback_config_skips_bonds(self):
+ """generate_fallback_config will skip any bonded interfaces."""
+ # A connected veth which gets ignored
+ write_file(os.path.join(self.sysdir, "eth0", "carrier"), "1")
+ mac = "aa:bb:cc:aa:bb:cc"
+ write_file(os.path.join(self.sysdir, "eth0", "address"), mac)
+ ensure_file(os.path.join(self.sysdir, "eth0", "bonding"))
+ self.assertIsNone(net.generate_fallback_config())
+
+ def test_generate_fallback_config_skips_netfail_devs(self):
+ """gen_fallback_config ignores netfail primary,sby no mac on master."""
+ mac = "aa:bb:cc:aa:bb:cc" # netfailover devs share the same mac
+ for iface in ["ens3", "ens3sby", "enP0s1f3"]:
+ write_file(os.path.join(self.sysdir, iface, "carrier"), "1")
+ write_file(
+ os.path.join(self.sysdir, iface, "addr_assign_type"), "0"
+ )
+ write_file(os.path.join(self.sysdir, iface, "address"), mac)
+
+ def is_netfail(iface, _driver=None):
+ # ens3 is the master
+ if iface == "ens3":
+ return False
+ return True
+
+ self.m_netfail.side_effect = is_netfail
+
+ def is_netfail_master(iface, _driver=None):
+ # ens3 is the master
+ if iface == "ens3":
+ return True
+ return False
+
+ self.m_netfail_master.side_effect = is_netfail_master
+ expected = {
+ "ethernets": {
+ "ens3": {
+ "dhcp4": True,
+ "match": {"name": "ens3"},
+ "set-name": "ens3",
+ }
+ },
+ "version": 2,
+ }
+ result = net.generate_fallback_config()
+ self.assertEqual(expected, result)
+
+
+class TestNetFindFallBackNic(CiTestCase):
+ def setUp(self):
+ super(TestNetFindFallBackNic, self).setUp()
+ sys_mock = mock.patch("cloudinit.net.get_sys_class_path")
+ self.m_sys_path = sys_mock.start()
+ self.sysdir = self.tmp_dir() + "/"
+ self.m_sys_path.return_value = self.sysdir
+ self.addCleanup(sys_mock.stop)
+ self.add_patch(
+ "cloudinit.net.util.is_container",
+ "m_is_container",
+ return_value=False,
+ )
+ self.add_patch("cloudinit.net.util.udevadm_settle", "m_settle")
+
+ def test_generate_fallback_finds_first_connected_eth_with_mac(self):
+ """find_fallback_nic finds any connected device with a mac."""
+ write_file(os.path.join(self.sysdir, "eth0", "carrier"), "1")
+ write_file(os.path.join(self.sysdir, "eth1", "carrier"), "1")
+ mac = "aa:bb:cc:aa:bb:cc"
+ write_file(os.path.join(self.sysdir, "eth1", "address"), mac)
+ self.assertEqual("eth1", net.find_fallback_nic())
+
+
+class TestGetDeviceList(CiTestCase):
+ def setUp(self):
+ super(TestGetDeviceList, self).setUp()
+ sys_mock = mock.patch("cloudinit.net.get_sys_class_path")
+ self.m_sys_path = sys_mock.start()
+ self.sysdir = self.tmp_dir() + "/"
+ self.m_sys_path.return_value = self.sysdir
+ self.addCleanup(sys_mock.stop)
+
+ def test_get_devicelist_raise_oserror(self):
+ """get_devicelist raise any non-ENOENT OSerror."""
+ error = OSError("Can not do it")
+ error.errno = errno.EPERM # Set non-ENOENT
+ self.m_sys_path.side_effect = error
+ with self.assertRaises(OSError) as context_manager:
+ net.get_devicelist()
+ exception = context_manager.exception
+ self.assertEqual("Can not do it", str(exception))
+
+ def test_get_devicelist_empty_without_sys_net(self):
+ """get_devicelist returns empty list when missing SYS_CLASS_NET."""
+ self.m_sys_path.return_value = "idontexist"
+ self.assertEqual([], net.get_devicelist())
+
+ def test_get_devicelist_empty_with_no_devices_in_sys_net(self):
+ """get_devicelist returns empty directoty listing for SYS_CLASS_NET."""
+ self.assertEqual([], net.get_devicelist())
+
+ def test_get_devicelist_lists_any_subdirectories_in_sys_net(self):
+ """get_devicelist returns a directory listing for SYS_CLASS_NET."""
+ write_file(os.path.join(self.sysdir, "eth0", "operstate"), "up")
+ write_file(os.path.join(self.sysdir, "eth1", "operstate"), "up")
+ self.assertCountEqual(["eth0", "eth1"], net.get_devicelist())
+
+
+@mock.patch(
+ "cloudinit.net.is_openvswitch_internal_interface",
+ mock.Mock(return_value=False),
+)
+class TestGetInterfaceMAC(CiTestCase):
+ def setUp(self):
+ super(TestGetInterfaceMAC, self).setUp()
+ sys_mock = mock.patch("cloudinit.net.get_sys_class_path")
+ self.m_sys_path = sys_mock.start()
+ self.sysdir = self.tmp_dir() + "/"
+ self.m_sys_path.return_value = self.sysdir
+ self.addCleanup(sys_mock.stop)
+
+ def test_get_interface_mac_false_with_no_mac(self):
+ """get_device_list returns False when no mac is reported."""
+ ensure_file(os.path.join(self.sysdir, "eth0", "bonding"))
+ mac_path = os.path.join(self.sysdir, "eth0", "address")
+ self.assertFalse(os.path.exists(mac_path))
+ self.assertFalse(net.get_interface_mac("eth0"))
+
+ def test_get_interface_mac(self):
+ """get_interfaces returns the mac from SYS_CLASS_NET/dev/address."""
+ mac = "aa:bb:cc:aa:bb:cc"
+ write_file(os.path.join(self.sysdir, "eth1", "address"), mac)
+ self.assertEqual(mac, net.get_interface_mac("eth1"))
+
+ def test_get_interface_mac_grabs_bonding_address(self):
+ """get_interfaces returns the source device mac for bonded devices."""
+ source_dev_mac = "aa:bb:cc:aa:bb:cc"
+ bonded_mac = "dd:ee:ff:dd:ee:ff"
+ write_file(os.path.join(self.sysdir, "eth1", "address"), bonded_mac)
+ write_file(
+ os.path.join(self.sysdir, "eth1", "bonding_slave", "perm_hwaddr"),
+ source_dev_mac,
+ )
+ self.assertEqual(source_dev_mac, net.get_interface_mac("eth1"))
+
+ def test_get_interfaces_empty_list_without_sys_net(self):
+ """get_interfaces returns an empty list when missing SYS_CLASS_NET."""
+ self.m_sys_path.return_value = "idontexist"
+ self.assertEqual([], net.get_interfaces())
+
+ def test_get_interfaces_by_mac_skips_empty_mac(self):
+ """Ignore 00:00:00:00:00:00 addresses from get_interfaces_by_mac."""
+ empty_mac = "00:00:00:00:00:00"
+ mac = "aa:bb:cc:aa:bb:cc"
+ write_file(os.path.join(self.sysdir, "eth1", "address"), empty_mac)
+ write_file(os.path.join(self.sysdir, "eth1", "addr_assign_type"), "0")
+ write_file(os.path.join(self.sysdir, "eth2", "addr_assign_type"), "0")
+ write_file(os.path.join(self.sysdir, "eth2", "address"), mac)
+ expected = [("eth2", "aa:bb:cc:aa:bb:cc", None, None)]
+ self.assertEqual(expected, net.get_interfaces())
+
+ def test_get_interfaces_by_mac_skips_missing_mac(self):
+ """Ignore interfaces without an address from get_interfaces_by_mac."""
+ write_file(os.path.join(self.sysdir, "eth1", "addr_assign_type"), "0")
+ address_path = os.path.join(self.sysdir, "eth1", "address")
+ self.assertFalse(os.path.exists(address_path))
+ mac = "aa:bb:cc:aa:bb:cc"
+ write_file(os.path.join(self.sysdir, "eth2", "addr_assign_type"), "0")
+ write_file(os.path.join(self.sysdir, "eth2", "address"), mac)
+ expected = [("eth2", "aa:bb:cc:aa:bb:cc", None, None)]
+ self.assertEqual(expected, net.get_interfaces())
+
+ def test_get_interfaces_by_mac_skips_master_devs(self):
+ """Ignore interfaces with a master device which would have dup mac."""
+ mac1 = mac2 = "aa:bb:cc:aa:bb:cc"
+ write_file(os.path.join(self.sysdir, "eth1", "addr_assign_type"), "0")
+ write_file(os.path.join(self.sysdir, "eth1", "address"), mac1)
+ write_file(os.path.join(self.sysdir, "eth1", "master"), "blah")
+ write_file(os.path.join(self.sysdir, "eth2", "addr_assign_type"), "0")
+ write_file(os.path.join(self.sysdir, "eth2", "address"), mac2)
+ expected = [("eth2", mac2, None, None)]
+ self.assertEqual(expected, net.get_interfaces())
+
+ @mock.patch("cloudinit.net.is_netfailover")
+ def test_get_interfaces_by_mac_skips_netfailvoer(self, m_netfail):
+ """Ignore interfaces if netfailover primary or standby."""
+ mac = "aa:bb:cc:aa:bb:cc" # netfailover devs share the same mac
+ for iface in ["ens3", "ens3sby", "enP0s1f3"]:
+ write_file(
+ os.path.join(self.sysdir, iface, "addr_assign_type"), "0"
+ )
+ write_file(os.path.join(self.sysdir, iface, "address"), mac)
+
+ def is_netfail(iface, _driver=None):
+ # ens3 is the master
+ if iface == "ens3":
+ return False
+ else:
+ return True
+
+ m_netfail.side_effect = is_netfail
+ expected = [("ens3", mac, None, None)]
+ self.assertEqual(expected, net.get_interfaces())
+
+ def test_get_interfaces_does_not_skip_phys_members_of_bridges_and_bonds(
+ self,
+ ):
+ bridge_mac = "aa:bb:cc:aa:bb:cc"
+ bond_mac = "cc:bb:aa:cc:bb:aa"
+ ovs_mac = "bb:cc:aa:bb:cc:aa"
+
+ write_file(os.path.join(self.sysdir, "br0", "address"), bridge_mac)
+ write_file(os.path.join(self.sysdir, "br0", "bridge"), "")
+
+ write_file(os.path.join(self.sysdir, "bond0", "address"), bond_mac)
+ write_file(os.path.join(self.sysdir, "bond0", "bonding"), "")
+
+ write_file(os.path.join(self.sysdir, "ovs-system", "address"), ovs_mac)
+
+ write_file(os.path.join(self.sysdir, "eth1", "address"), bridge_mac)
+ os.symlink("../br0", os.path.join(self.sysdir, "eth1", "master"))
+
+ write_file(os.path.join(self.sysdir, "eth2", "address"), bond_mac)
+ os.symlink("../bond0", os.path.join(self.sysdir, "eth2", "master"))
+
+ write_file(os.path.join(self.sysdir, "eth3", "address"), ovs_mac)
+ os.symlink(
+ "../ovs-system", os.path.join(self.sysdir, "eth3", "master")
+ )
+ os.symlink(
+ "../ovs-system",
+ os.path.join(self.sysdir, "eth3", "upper_ovs-system"),
+ )
+
+ interface_names = [interface[0] for interface in net.get_interfaces()]
+ self.assertEqual(
+ ["eth1", "eth2", "eth3", "ovs-system"], sorted(interface_names)
+ )
+
+
+class TestInterfaceHasOwnMAC(CiTestCase):
+ def setUp(self):
+ super(TestInterfaceHasOwnMAC, self).setUp()
+ sys_mock = mock.patch("cloudinit.net.get_sys_class_path")
+ self.m_sys_path = sys_mock.start()
+ self.sysdir = self.tmp_dir() + "/"
+ self.m_sys_path.return_value = self.sysdir
+ self.addCleanup(sys_mock.stop)
+
+ def test_interface_has_own_mac_false_when_stolen(self):
+ """Return False from interface_has_own_mac when address is stolen."""
+ write_file(os.path.join(self.sysdir, "eth1", "addr_assign_type"), "2")
+ self.assertFalse(net.interface_has_own_mac("eth1"))
+
+ def test_interface_has_own_mac_true_when_not_stolen(self):
+ """Return False from interface_has_own_mac when mac isn't stolen."""
+ valid_assign_types = ["0", "1", "3"]
+ assign_path = os.path.join(self.sysdir, "eth1", "addr_assign_type")
+ for _type in valid_assign_types:
+ write_file(assign_path, _type)
+ self.assertTrue(net.interface_has_own_mac("eth1"))
+
+ def test_interface_has_own_mac_strict_errors_on_absent_assign_type(self):
+ """When addr_assign_type is absent, interface_has_own_mac errors."""
+ with self.assertRaises(ValueError):
+ net.interface_has_own_mac("eth1", strict=True)
+
+
+@mock.patch("cloudinit.net.subp.subp")
+class TestEphemeralIPV4Network(CiTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestEphemeralIPV4Network, self).setUp()
+ sys_mock = mock.patch("cloudinit.net.get_sys_class_path")
+ self.m_sys_path = sys_mock.start()
+ self.sysdir = self.tmp_dir() + "/"
+ self.m_sys_path.return_value = self.sysdir
+ self.addCleanup(sys_mock.stop)
+
+ def test_ephemeral_ipv4_network_errors_on_missing_params(self, m_subp):
+ """No required params for EphemeralIPv4Network can be None."""
+ required_params = {
+ "interface": "eth0",
+ "ip": "192.168.2.2",
+ "prefix_or_mask": "255.255.255.0",
+ "broadcast": "192.168.2.255",
+ }
+ for key in required_params.keys():
+ params = copy.deepcopy(required_params)
+ params[key] = None
+ with self.assertRaises(ValueError) as context_manager:
+ net.EphemeralIPv4Network(**params)
+ error = context_manager.exception
+ self.assertIn("Cannot init network on", str(error))
+ self.assertEqual(0, m_subp.call_count)
+
+ def test_ephemeral_ipv4_network_errors_invalid_mask_prefix(self, m_subp):
+ """Raise an error when prefix_or_mask is not a netmask or prefix."""
+ params = {
+ "interface": "eth0",
+ "ip": "192.168.2.2",
+ "broadcast": "192.168.2.255",
+ }
+ invalid_masks = ("invalid", "invalid.", "123.123.123")
+ for error_val in invalid_masks:
+ params["prefix_or_mask"] = error_val
+ with self.assertRaises(ValueError) as context_manager:
+ with net.EphemeralIPv4Network(**params):
+ pass
+ error = context_manager.exception
+ self.assertIn(
+ "Cannot setup network, invalid prefix or netmask: ", str(error)
+ )
+ self.assertEqual(0, m_subp.call_count)
+
+ def test_ephemeral_ipv4_network_performs_teardown(self, m_subp):
+ """EphemeralIPv4Network performs teardown on the device if setup."""
+ expected_setup_calls = [
+ mock.call(
+ [
+ "ip",
+ "-family",
+ "inet",
+ "addr",
+ "add",
+ "192.168.2.2/24",
+ "broadcast",
+ "192.168.2.255",
+ "dev",
+ "eth0",
+ ],
+ capture=True,
+ update_env={"LANG": "C"},
+ ),
+ mock.call(
+ ["ip", "-family", "inet", "link", "set", "dev", "eth0", "up"],
+ capture=True,
+ ),
+ ]
+ expected_teardown_calls = [
+ mock.call(
+ [
+ "ip",
+ "-family",
+ "inet",
+ "link",
+ "set",
+ "dev",
+ "eth0",
+ "down",
+ ],
+ capture=True,
+ ),
+ mock.call(
+ [
+ "ip",
+ "-family",
+ "inet",
+ "addr",
+ "del",
+ "192.168.2.2/24",
+ "dev",
+ "eth0",
+ ],
+ capture=True,
+ ),
+ ]
+ params = {
+ "interface": "eth0",
+ "ip": "192.168.2.2",
+ "prefix_or_mask": "255.255.255.0",
+ "broadcast": "192.168.2.255",
+ }
+ with net.EphemeralIPv4Network(**params):
+ self.assertEqual(expected_setup_calls, m_subp.call_args_list)
+ m_subp.assert_has_calls(expected_teardown_calls)
+
+ @mock.patch("cloudinit.net.readurl")
+ def test_ephemeral_ipv4_no_network_if_url_connectivity(
+ self, m_readurl, m_subp
+ ):
+ """No network setup is performed if we can successfully connect to
+ connectivity_url."""
+ params = {
+ "interface": "eth0",
+ "ip": "192.168.2.2",
+ "prefix_or_mask": "255.255.255.0",
+ "broadcast": "192.168.2.255",
+ "connectivity_url_data": {"url": "http://example.org/index.html"},
+ }
+
+ with net.EphemeralIPv4Network(**params):
+ self.assertEqual(
+ [mock.call(url="http://example.org/index.html", timeout=5)],
+ m_readurl.call_args_list,
+ )
+ # Ensure that no teardown happens:
+ m_subp.assert_has_calls([])
+
+ def test_ephemeral_ipv4_network_noop_when_configured(self, m_subp):
+ """EphemeralIPv4Network handles exception when address is setup.
+
+ It performs no cleanup as the interface was already setup.
+ """
+ params = {
+ "interface": "eth0",
+ "ip": "192.168.2.2",
+ "prefix_or_mask": "255.255.255.0",
+ "broadcast": "192.168.2.255",
+ }
+ m_subp.side_effect = ProcessExecutionError(
+ "", "RTNETLINK answers: File exists", 2
+ )
+ expected_calls = [
+ mock.call(
+ [
+ "ip",
+ "-family",
+ "inet",
+ "addr",
+ "add",
+ "192.168.2.2/24",
+ "broadcast",
+ "192.168.2.255",
+ "dev",
+ "eth0",
+ ],
+ capture=True,
+ update_env={"LANG": "C"},
+ )
+ ]
+ with net.EphemeralIPv4Network(**params):
+ pass
+ self.assertEqual(expected_calls, m_subp.call_args_list)
+ self.assertIn(
+ "Skip ephemeral network setup, eth0 already has address",
+ self.logs.getvalue(),
+ )
+
+ def test_ephemeral_ipv4_network_with_prefix(self, m_subp):
+ """EphemeralIPv4Network takes a valid prefix to setup the network."""
+ params = {
+ "interface": "eth0",
+ "ip": "192.168.2.2",
+ "prefix_or_mask": "24",
+ "broadcast": "192.168.2.255",
+ }
+ for prefix_val in ["24", 16]: # prefix can be int or string
+ params["prefix_or_mask"] = prefix_val
+ with net.EphemeralIPv4Network(**params):
+ pass
+ m_subp.assert_has_calls(
+ [
+ mock.call(
+ [
+ "ip",
+ "-family",
+ "inet",
+ "addr",
+ "add",
+ "192.168.2.2/24",
+ "broadcast",
+ "192.168.2.255",
+ "dev",
+ "eth0",
+ ],
+ capture=True,
+ update_env={"LANG": "C"},
+ )
+ ]
+ )
+ m_subp.assert_has_calls(
+ [
+ mock.call(
+ [
+ "ip",
+ "-family",
+ "inet",
+ "addr",
+ "add",
+ "192.168.2.2/16",
+ "broadcast",
+ "192.168.2.255",
+ "dev",
+ "eth0",
+ ],
+ capture=True,
+ update_env={"LANG": "C"},
+ )
+ ]
+ )
+
+ def test_ephemeral_ipv4_network_with_new_default_route(self, m_subp):
+ """Add the route when router is set and no default route exists."""
+ params = {
+ "interface": "eth0",
+ "ip": "192.168.2.2",
+ "prefix_or_mask": "255.255.255.0",
+ "broadcast": "192.168.2.255",
+ "router": "192.168.2.1",
+ }
+ m_subp.return_value = "", "" # Empty response from ip route gw check
+ expected_setup_calls = [
+ mock.call(
+ [
+ "ip",
+ "-family",
+ "inet",
+ "addr",
+ "add",
+ "192.168.2.2/24",
+ "broadcast",
+ "192.168.2.255",
+ "dev",
+ "eth0",
+ ],
+ capture=True,
+ update_env={"LANG": "C"},
+ ),
+ mock.call(
+ ["ip", "-family", "inet", "link", "set", "dev", "eth0", "up"],
+ capture=True,
+ ),
+ mock.call(["ip", "route", "show", "0.0.0.0/0"], capture=True),
+ mock.call(
+ [
+ "ip",
+ "-4",
+ "route",
+ "add",
+ "192.168.2.1",
+ "dev",
+ "eth0",
+ "src",
+ "192.168.2.2",
+ ],
+ capture=True,
+ ),
+ mock.call(
+ [
+ "ip",
+ "-4",
+ "route",
+ "add",
+ "default",
+ "via",
+ "192.168.2.1",
+ "dev",
+ "eth0",
+ ],
+ capture=True,
+ ),
+ ]
+ expected_teardown_calls = [
+ mock.call(
+ ["ip", "-4", "route", "del", "default", "dev", "eth0"],
+ capture=True,
+ ),
+ mock.call(
+ [
+ "ip",
+ "-4",
+ "route",
+ "del",
+ "192.168.2.1",
+ "dev",
+ "eth0",
+ "src",
+ "192.168.2.2",
+ ],
+ capture=True,
+ ),
+ ]
+
+ with net.EphemeralIPv4Network(**params):
+ self.assertEqual(expected_setup_calls, m_subp.call_args_list)
+ m_subp.assert_has_calls(expected_teardown_calls)
+
+ def test_ephemeral_ipv4_network_with_rfc3442_static_routes(self, m_subp):
+ params = {
+ "interface": "eth0",
+ "ip": "192.168.2.2",
+ "prefix_or_mask": "255.255.255.255",
+ "broadcast": "192.168.2.255",
+ "static_routes": [
+ ("192.168.2.1/32", "0.0.0.0"),
+ ("169.254.169.254/32", "192.168.2.1"),
+ ("0.0.0.0/0", "192.168.2.1"),
+ ],
+ "router": "192.168.2.1",
+ }
+ expected_setup_calls = [
+ mock.call(
+ [
+ "ip",
+ "-family",
+ "inet",
+ "addr",
+ "add",
+ "192.168.2.2/32",
+ "broadcast",
+ "192.168.2.255",
+ "dev",
+ "eth0",
+ ],
+ capture=True,
+ update_env={"LANG": "C"},
+ ),
+ mock.call(
+ ["ip", "-family", "inet", "link", "set", "dev", "eth0", "up"],
+ capture=True,
+ ),
+ mock.call(
+ [
+ "ip",
+ "-4",
+ "route",
+ "append",
+ "192.168.2.1/32",
+ "dev",
+ "eth0",
+ ],
+ capture=True,
+ ),
+ mock.call(
+ [
+ "ip",
+ "-4",
+ "route",
+ "append",
+ "169.254.169.254/32",
+ "via",
+ "192.168.2.1",
+ "dev",
+ "eth0",
+ ],
+ capture=True,
+ ),
+ mock.call(
+ [
+ "ip",
+ "-4",
+ "route",
+ "append",
+ "0.0.0.0/0",
+ "via",
+ "192.168.2.1",
+ "dev",
+ "eth0",
+ ],
+ capture=True,
+ ),
+ ]
+ expected_teardown_calls = [
+ mock.call(
+ [
+ "ip",
+ "-4",
+ "route",
+ "del",
+ "0.0.0.0/0",
+ "via",
+ "192.168.2.1",
+ "dev",
+ "eth0",
+ ],
+ capture=True,
+ ),
+ mock.call(
+ [
+ "ip",
+ "-4",
+ "route",
+ "del",
+ "169.254.169.254/32",
+ "via",
+ "192.168.2.1",
+ "dev",
+ "eth0",
+ ],
+ capture=True,
+ ),
+ mock.call(
+ ["ip", "-4", "route", "del", "192.168.2.1/32", "dev", "eth0"],
+ capture=True,
+ ),
+ mock.call(
+ [
+ "ip",
+ "-family",
+ "inet",
+ "link",
+ "set",
+ "dev",
+ "eth0",
+ "down",
+ ],
+ capture=True,
+ ),
+ mock.call(
+ [
+ "ip",
+ "-family",
+ "inet",
+ "addr",
+ "del",
+ "192.168.2.2/32",
+ "dev",
+ "eth0",
+ ],
+ capture=True,
+ ),
+ ]
+ with net.EphemeralIPv4Network(**params):
+ self.assertEqual(expected_setup_calls, m_subp.call_args_list)
+ m_subp.assert_has_calls(expected_setup_calls + expected_teardown_calls)
+
+
+class TestApplyNetworkCfgNames(CiTestCase):
+ V1_CONFIG = textwrap.dedent(
+ """\
+ version: 1
+ config:
+ - type: physical
+ name: interface0
+ mac_address: "52:54:00:12:34:00"
+ subnets:
+ - type: static
+ address: 10.0.2.15
+ netmask: 255.255.255.0
+ gateway: 10.0.2.2
+ """
+ )
+ V2_CONFIG = textwrap.dedent(
+ """\
+ version: 2
+ ethernets:
+ interface0:
+ match:
+ macaddress: "52:54:00:12:34:00"
+ addresses:
+ - 10.0.2.15/24
+ gateway4: 10.0.2.2
+ set-name: interface0
+ """
+ )
+
+ V2_CONFIG_NO_SETNAME = textwrap.dedent(
+ """\
+ version: 2
+ ethernets:
+ interface0:
+ match:
+ macaddress: "52:54:00:12:34:00"
+ addresses:
+ - 10.0.2.15/24
+ gateway4: 10.0.2.2
+ """
+ )
+
+ V2_CONFIG_NO_MAC = textwrap.dedent(
+ """\
+ version: 2
+ ethernets:
+ interface0:
+ match:
+ driver: virtio-net
+ addresses:
+ - 10.0.2.15/24
+ gateway4: 10.0.2.2
+ set-name: interface0
+ """
+ )
+
+ @mock.patch("cloudinit.net.device_devid")
+ @mock.patch("cloudinit.net.device_driver")
+ @mock.patch("cloudinit.net._rename_interfaces")
+ def test_apply_v1_renames(
+ self, m_rename_interfaces, m_device_driver, m_device_devid
+ ):
+ m_device_driver.return_value = "virtio_net"
+ m_device_devid.return_value = "0x15d8"
+
+ net.apply_network_config_names(yaml.load(self.V1_CONFIG))
+
+ call = ["52:54:00:12:34:00", "interface0", "virtio_net", "0x15d8"]
+ m_rename_interfaces.assert_called_with([call])
+
+ @mock.patch("cloudinit.net.device_devid")
+ @mock.patch("cloudinit.net.device_driver")
+ @mock.patch("cloudinit.net._rename_interfaces")
+ def test_apply_v2_renames(
+ self, m_rename_interfaces, m_device_driver, m_device_devid
+ ):
+ m_device_driver.return_value = "virtio_net"
+ m_device_devid.return_value = "0x15d8"
+
+ net.apply_network_config_names(yaml.load(self.V2_CONFIG))
+
+ call = ["52:54:00:12:34:00", "interface0", "virtio_net", "0x15d8"]
+ m_rename_interfaces.assert_called_with([call])
+
+ @mock.patch("cloudinit.net._rename_interfaces")
+ def test_apply_v2_renames_skips_without_setname(self, m_rename_interfaces):
+ net.apply_network_config_names(yaml.load(self.V2_CONFIG_NO_SETNAME))
+ m_rename_interfaces.assert_called_with([])
+
+ @mock.patch("cloudinit.net._rename_interfaces")
+ def test_apply_v2_renames_skips_without_mac(self, m_rename_interfaces):
+ net.apply_network_config_names(yaml.load(self.V2_CONFIG_NO_MAC))
+ m_rename_interfaces.assert_called_with([])
+
+ def test_apply_v2_renames_raises_runtime_error_on_unknown_version(self):
+ with self.assertRaises(RuntimeError):
+ net.apply_network_config_names(yaml.load("version: 3"))
+
+
+class TestHasURLConnectivity(HttprettyTestCase):
+ def setUp(self):
+ super(TestHasURLConnectivity, self).setUp()
+ self.url = "http://fake/"
+ self.kwargs = {"allow_redirects": True, "timeout": 5.0}
+
+ @mock.patch("cloudinit.net.readurl")
+ def test_url_timeout_on_connectivity_check(self, m_readurl):
+ """A timeout of 5 seconds is provided when reading a url."""
+ self.assertTrue(
+ net.has_url_connectivity({"url": self.url}),
+ "Expected True on url connect",
+ )
+
+ def test_true_on_url_connectivity_success(self):
+ httpretty.register_uri(httpretty.GET, self.url)
+ self.assertTrue(
+ net.has_url_connectivity({"url": self.url}),
+ "Expected True on url connect",
+ )
+
+ @mock.patch("requests.Session.request")
+ def test_true_on_url_connectivity_timeout(self, m_request):
+ """A timeout raised accessing the url will return False."""
+ m_request.side_effect = requests.Timeout("Fake Connection Timeout")
+ self.assertFalse(
+ net.has_url_connectivity({"url": self.url}),
+ "Expected False on url timeout",
+ )
+
+ def test_true_on_url_connectivity_failure(self):
+ httpretty.register_uri(httpretty.GET, self.url, body={}, status=404)
+ self.assertFalse(
+ net.has_url_connectivity({"url": self.url}),
+ "Expected False on url fail",
+ )
+
+
+def _mk_v1_phys(mac, name, driver, device_id):
+ v1_cfg = {"type": "physical", "name": name, "mac_address": mac}
+ params = {}
+ if driver:
+ params.update({"driver": driver})
+ if device_id:
+ params.update({"device_id": device_id})
+
+ if params:
+ v1_cfg.update({"params": params})
+
+ return v1_cfg
+
+
+def _mk_v2_phys(mac, name, driver=None, device_id=None):
+ v2_cfg = {"set-name": name, "match": {"macaddress": mac}}
+ if driver:
+ v2_cfg["match"].update({"driver": driver})
+ if device_id:
+ v2_cfg["match"].update({"device_id": device_id})
+
+ return v2_cfg
+
+
+class TestExtractPhysdevs(CiTestCase):
+ def setUp(self):
+ super(TestExtractPhysdevs, self).setUp()
+ self.add_patch("cloudinit.net.device_driver", "m_driver")
+ self.add_patch("cloudinit.net.device_devid", "m_devid")
+
+ def test_extract_physdevs_looks_up_driver_v1(self):
+ driver = "virtio"
+ self.m_driver.return_value = driver
+ physdevs = [
+ ["aa:bb:cc:dd:ee:ff", "eth0", None, "0x1000"],
+ ]
+ netcfg = {
+ "version": 1,
+ "config": [_mk_v1_phys(*args) for args in physdevs],
+ }
+ # insert the driver value for verification
+ physdevs[0][2] = driver
+ self.assertEqual(
+ sorted(physdevs), sorted(net.extract_physdevs(netcfg))
+ )
+ self.m_driver.assert_called_with("eth0")
+
+ def test_extract_physdevs_looks_up_driver_v2(self):
+ driver = "virtio"
+ self.m_driver.return_value = driver
+ physdevs = [
+ ["aa:bb:cc:dd:ee:ff", "eth0", None, "0x1000"],
+ ]
+ netcfg = {
+ "version": 2,
+ "ethernets": {args[1]: _mk_v2_phys(*args) for args in physdevs},
+ }
+ # insert the driver value for verification
+ physdevs[0][2] = driver
+ self.assertEqual(
+ sorted(physdevs), sorted(net.extract_physdevs(netcfg))
+ )
+ self.m_driver.assert_called_with("eth0")
+
+ def test_extract_physdevs_looks_up_devid_v1(self):
+ devid = "0x1000"
+ self.m_devid.return_value = devid
+ physdevs = [
+ ["aa:bb:cc:dd:ee:ff", "eth0", "virtio", None],
+ ]
+ netcfg = {
+ "version": 1,
+ "config": [_mk_v1_phys(*args) for args in physdevs],
+ }
+ # insert the driver value for verification
+ physdevs[0][3] = devid
+ self.assertEqual(
+ sorted(physdevs), sorted(net.extract_physdevs(netcfg))
+ )
+ self.m_devid.assert_called_with("eth0")
+
+ def test_extract_physdevs_looks_up_devid_v2(self):
+ devid = "0x1000"
+ self.m_devid.return_value = devid
+ physdevs = [
+ ["aa:bb:cc:dd:ee:ff", "eth0", "virtio", None],
+ ]
+ netcfg = {
+ "version": 2,
+ "ethernets": {args[1]: _mk_v2_phys(*args) for args in physdevs},
+ }
+ # insert the driver value for verification
+ physdevs[0][3] = devid
+ self.assertEqual(
+ sorted(physdevs), sorted(net.extract_physdevs(netcfg))
+ )
+ self.m_devid.assert_called_with("eth0")
+
+ def test_get_v1_type_physical(self):
+ physdevs = [
+ ["aa:bb:cc:dd:ee:ff", "eth0", "virtio", "0x1000"],
+ ["00:11:22:33:44:55", "ens3", "e1000", "0x1643"],
+ ["09:87:65:43:21:10", "ens0p1", "mlx4_core", "0:0:1000"],
+ ]
+ netcfg = {
+ "version": 1,
+ "config": [_mk_v1_phys(*args) for args in physdevs],
+ }
+ self.assertEqual(
+ sorted(physdevs), sorted(net.extract_physdevs(netcfg))
+ )
+
+ def test_get_v2_type_physical(self):
+ physdevs = [
+ ["aa:bb:cc:dd:ee:ff", "eth0", "virtio", "0x1000"],
+ ["00:11:22:33:44:55", "ens3", "e1000", "0x1643"],
+ ["09:87:65:43:21:10", "ens0p1", "mlx4_core", "0:0:1000"],
+ ]
+ netcfg = {
+ "version": 2,
+ "ethernets": {args[1]: _mk_v2_phys(*args) for args in physdevs},
+ }
+ self.assertEqual(
+ sorted(physdevs), sorted(net.extract_physdevs(netcfg))
+ )
+
+ def test_get_v2_type_physical_skips_if_no_set_name(self):
+ netcfg = {
+ "version": 2,
+ "ethernets": {
+ "ens3": {
+ "match": {"macaddress": "00:11:22:33:44:55"},
+ }
+ },
+ }
+ self.assertEqual([], net.extract_physdevs(netcfg))
+
+ def test_runtime_error_on_unknown_netcfg_version(self):
+ with self.assertRaises(RuntimeError):
+ net.extract_physdevs({"version": 3, "awesome_config": []})
+
+
+class TestNetFailOver(CiTestCase):
+ def setUp(self):
+ super(TestNetFailOver, self).setUp()
+ self.add_patch("cloudinit.net.util", "m_util")
+ self.add_patch("cloudinit.net.read_sys_net", "m_read_sys_net")
+ self.add_patch("cloudinit.net.device_driver", "m_device_driver")
+
+ def test_get_dev_features(self):
+ devname = self.random_string()
+ features = self.random_string()
+ self.m_read_sys_net.return_value = features
+
+ self.assertEqual(features, net.get_dev_features(devname))
+ self.assertEqual(1, self.m_read_sys_net.call_count)
+ self.assertEqual(
+ mock.call(devname, "device/features"),
+ self.m_read_sys_net.call_args_list[0],
+ )
+
+ def test_get_dev_features_none_returns_empty_string(self):
+ devname = self.random_string()
+ self.m_read_sys_net.side_effect = Exception("error")
+ self.assertEqual("", net.get_dev_features(devname))
+ self.assertEqual(1, self.m_read_sys_net.call_count)
+ self.assertEqual(
+ mock.call(devname, "device/features"),
+ self.m_read_sys_net.call_args_list[0],
+ )
+
+ @mock.patch("cloudinit.net.get_dev_features")
+ def test_has_netfail_standby_feature(self, m_dev_features):
+ devname = self.random_string()
+ standby_features = ("0" * 62) + "1" + "0"
+ m_dev_features.return_value = standby_features
+ self.assertTrue(net.has_netfail_standby_feature(devname))
+
+ @mock.patch("cloudinit.net.get_dev_features")
+ def test_has_netfail_standby_feature_short_is_false(self, m_dev_features):
+ devname = self.random_string()
+ standby_features = self.random_string()
+ m_dev_features.return_value = standby_features
+ self.assertFalse(net.has_netfail_standby_feature(devname))
+
+ @mock.patch("cloudinit.net.get_dev_features")
+ def test_has_netfail_standby_feature_not_present_is_false(
+ self, m_dev_features
+ ):
+ devname = self.random_string()
+ standby_features = "0" * 64
+ m_dev_features.return_value = standby_features
+ self.assertFalse(net.has_netfail_standby_feature(devname))
+
+ @mock.patch("cloudinit.net.get_dev_features")
+ def test_has_netfail_standby_feature_no_features_is_false(
+ self, m_dev_features
+ ):
+ devname = self.random_string()
+ standby_features = None
+ m_dev_features.return_value = standby_features
+ self.assertFalse(net.has_netfail_standby_feature(devname))
+
+ @mock.patch("cloudinit.net.has_netfail_standby_feature")
+ @mock.patch("cloudinit.net.os.path.exists")
+ def test_is_netfail_master(self, m_exists, m_standby):
+ devname = self.random_string()
+ driver = "virtio_net"
+ m_exists.return_value = False # no master sysfs attr
+ m_standby.return_value = True # has standby feature flag
+ self.assertTrue(net.is_netfail_master(devname, driver))
+
+ @mock.patch("cloudinit.net.sys_dev_path")
+ def test_is_netfail_master_checks_master_attr(self, m_sysdev):
+ devname = self.random_string()
+ driver = "virtio_net"
+ m_sysdev.return_value = self.random_string()
+ self.assertFalse(net.is_netfail_master(devname, driver))
+ self.assertEqual(1, m_sysdev.call_count)
+ self.assertEqual(
+ mock.call(devname, path="master"), m_sysdev.call_args_list[0]
+ )
+
+ @mock.patch("cloudinit.net.has_netfail_standby_feature")
+ @mock.patch("cloudinit.net.os.path.exists")
+ def test_is_netfail_master_wrong_driver(self, m_exists, m_standby):
+ devname = self.random_string()
+ driver = self.random_string()
+ self.assertFalse(net.is_netfail_master(devname, driver))
+
+ @mock.patch("cloudinit.net.has_netfail_standby_feature")
+ @mock.patch("cloudinit.net.os.path.exists")
+ def test_is_netfail_master_has_master_attr(self, m_exists, m_standby):
+ devname = self.random_string()
+ driver = "virtio_net"
+ m_exists.return_value = True # has master sysfs attr
+ self.assertFalse(net.is_netfail_master(devname, driver))
+
+ @mock.patch("cloudinit.net.has_netfail_standby_feature")
+ @mock.patch("cloudinit.net.os.path.exists")
+ def test_is_netfail_master_no_standby_feat(self, m_exists, m_standby):
+ devname = self.random_string()
+ driver = "virtio_net"
+ m_exists.return_value = False # no master sysfs attr
+ m_standby.return_value = False # no standby feature flag
+ self.assertFalse(net.is_netfail_master(devname, driver))
+
+ @mock.patch("cloudinit.net.has_netfail_standby_feature")
+ @mock.patch("cloudinit.net.os.path.exists")
+ @mock.patch("cloudinit.net.sys_dev_path")
+ def test_is_netfail_primary(self, m_sysdev, m_exists, m_standby):
+ devname = self.random_string()
+ driver = self.random_string() # device not virtio_net
+ master_devname = self.random_string()
+ m_sysdev.return_value = "%s/%s" % (
+ self.random_string(),
+ master_devname,
+ )
+ m_exists.return_value = True # has master sysfs attr
+ self.m_device_driver.return_value = "virtio_net" # master virtio_net
+ m_standby.return_value = True # has standby feature flag
+ self.assertTrue(net.is_netfail_primary(devname, driver))
+ self.assertEqual(1, self.m_device_driver.call_count)
+ self.assertEqual(
+ mock.call(master_devname), self.m_device_driver.call_args_list[0]
+ )
+ self.assertEqual(1, m_standby.call_count)
+ self.assertEqual(
+ mock.call(master_devname), m_standby.call_args_list[0]
+ )
+
+ @mock.patch("cloudinit.net.has_netfail_standby_feature")
+ @mock.patch("cloudinit.net.os.path.exists")
+ @mock.patch("cloudinit.net.sys_dev_path")
+ def test_is_netfail_primary_wrong_driver(
+ self, m_sysdev, m_exists, m_standby
+ ):
+ devname = self.random_string()
+ driver = "virtio_net"
+ self.assertFalse(net.is_netfail_primary(devname, driver))
+
+ @mock.patch("cloudinit.net.has_netfail_standby_feature")
+ @mock.patch("cloudinit.net.os.path.exists")
+ @mock.patch("cloudinit.net.sys_dev_path")
+ def test_is_netfail_primary_no_master(self, m_sysdev, m_exists, m_standby):
+ devname = self.random_string()
+ driver = self.random_string() # device not virtio_net
+ m_exists.return_value = False # no master sysfs attr
+ self.assertFalse(net.is_netfail_primary(devname, driver))
+
+ @mock.patch("cloudinit.net.has_netfail_standby_feature")
+ @mock.patch("cloudinit.net.os.path.exists")
+ @mock.patch("cloudinit.net.sys_dev_path")
+ def test_is_netfail_primary_bad_master(
+ self, m_sysdev, m_exists, m_standby
+ ):
+ devname = self.random_string()
+ driver = self.random_string() # device not virtio_net
+ master_devname = self.random_string()
+ m_sysdev.return_value = "%s/%s" % (
+ self.random_string(),
+ master_devname,
+ )
+ m_exists.return_value = True # has master sysfs attr
+ self.m_device_driver.return_value = "XXXX" # master not virtio_net
+ self.assertFalse(net.is_netfail_primary(devname, driver))
+
+ @mock.patch("cloudinit.net.has_netfail_standby_feature")
+ @mock.patch("cloudinit.net.os.path.exists")
+ @mock.patch("cloudinit.net.sys_dev_path")
+ def test_is_netfail_primary_no_standby(
+ self, m_sysdev, m_exists, m_standby
+ ):
+ devname = self.random_string()
+ driver = self.random_string() # device not virtio_net
+ master_devname = self.random_string()
+ m_sysdev.return_value = "%s/%s" % (
+ self.random_string(),
+ master_devname,
+ )
+ m_exists.return_value = True # has master sysfs attr
+ self.m_device_driver.return_value = "virtio_net" # master virtio_net
+ m_standby.return_value = False # master has no standby feature flag
+ self.assertFalse(net.is_netfail_primary(devname, driver))
+
+ @mock.patch("cloudinit.net.has_netfail_standby_feature")
+ @mock.patch("cloudinit.net.os.path.exists")
+ def test_is_netfail_standby(self, m_exists, m_standby):
+ devname = self.random_string()
+ driver = "virtio_net"
+ m_exists.return_value = True # has master sysfs attr
+ m_standby.return_value = True # has standby feature flag
+ self.assertTrue(net.is_netfail_standby(devname, driver))
+
+ @mock.patch("cloudinit.net.has_netfail_standby_feature")
+ @mock.patch("cloudinit.net.os.path.exists")
+ def test_is_netfail_standby_wrong_driver(self, m_exists, m_standby):
+ devname = self.random_string()
+ driver = self.random_string()
+ self.assertFalse(net.is_netfail_standby(devname, driver))
+
+ @mock.patch("cloudinit.net.has_netfail_standby_feature")
+ @mock.patch("cloudinit.net.os.path.exists")
+ def test_is_netfail_standby_no_master(self, m_exists, m_standby):
+ devname = self.random_string()
+ driver = "virtio_net"
+ m_exists.return_value = False # has master sysfs attr
+ self.assertFalse(net.is_netfail_standby(devname, driver))
+
+ @mock.patch("cloudinit.net.has_netfail_standby_feature")
+ @mock.patch("cloudinit.net.os.path.exists")
+ def test_is_netfail_standby_no_standby_feature(self, m_exists, m_standby):
+ devname = self.random_string()
+ driver = "virtio_net"
+ m_exists.return_value = True # has master sysfs attr
+ m_standby.return_value = False # has standby feature flag
+ self.assertFalse(net.is_netfail_standby(devname, driver))
+
+ @mock.patch("cloudinit.net.is_netfail_standby")
+ @mock.patch("cloudinit.net.is_netfail_primary")
+ def test_is_netfailover_primary(self, m_primary, m_standby):
+ devname = self.random_string()
+ driver = self.random_string()
+ m_primary.return_value = True
+ m_standby.return_value = False
+ self.assertTrue(net.is_netfailover(devname, driver))
+
+ @mock.patch("cloudinit.net.is_netfail_standby")
+ @mock.patch("cloudinit.net.is_netfail_primary")
+ def test_is_netfailover_standby(self, m_primary, m_standby):
+ devname = self.random_string()
+ driver = self.random_string()
+ m_primary.return_value = False
+ m_standby.return_value = True
+ self.assertTrue(net.is_netfailover(devname, driver))
+
+ @mock.patch("cloudinit.net.is_netfail_standby")
+ @mock.patch("cloudinit.net.is_netfail_primary")
+ def test_is_netfailover_returns_false(self, m_primary, m_standby):
+ devname = self.random_string()
+ driver = self.random_string()
+ m_primary.return_value = False
+ m_standby.return_value = False
+ self.assertFalse(net.is_netfailover(devname, driver))
+
+
+class TestOpenvswitchIsInstalled:
+ """Test cloudinit.net.openvswitch_is_installed.
+
+ Uses the ``clear_lru_cache`` local autouse fixture to allow us to test
+ despite the ``lru_cache`` decorator on the unit under test.
+ """
+
+ @pytest.fixture(autouse=True)
+ def clear_lru_cache(self):
+ net.openvswitch_is_installed.cache_clear()
+
+ @pytest.mark.parametrize(
+ "expected,which_return", [(True, "/some/path"), (False, None)]
+ )
+ @mock.patch("cloudinit.net.subp.which")
+ def test_mirrors_which_result(self, m_which, expected, which_return):
+ m_which.return_value = which_return
+ assert expected == net.openvswitch_is_installed()
+
+ @mock.patch("cloudinit.net.subp.which")
+ def test_only_calls_which_once(self, m_which):
+ net.openvswitch_is_installed()
+ net.openvswitch_is_installed()
+ assert 1 == m_which.call_count
+
+
+@mock.patch("cloudinit.net.subp.subp", return_value=("", ""))
+class TestGetOVSInternalInterfaces:
+ """Test cloudinit.net.get_ovs_internal_interfaces.
+
+ Uses the ``clear_lru_cache`` local autouse fixture to allow us to test
+ despite the ``lru_cache`` decorator on the unit under test.
+ """
+
+ @pytest.fixture(autouse=True)
+ def clear_lru_cache(self):
+ net.get_ovs_internal_interfaces.cache_clear()
+
+ def test_command_used(self, m_subp):
+ """Test we use the correct command when we call subp"""
+ net.get_ovs_internal_interfaces()
+
+ assert [
+ mock.call(net.OVS_INTERNAL_INTERFACE_LOOKUP_CMD)
+ ] == m_subp.call_args_list
+
+ def test_subp_contents_split_and_returned(self, m_subp):
+ """Test that the command output is appropriately mangled."""
+ stdout = "iface1\niface2\niface3\n"
+ m_subp.return_value = (stdout, "")
+
+ assert [
+ "iface1",
+ "iface2",
+ "iface3",
+ ] == net.get_ovs_internal_interfaces()
+
+ def test_database_connection_error_handled_gracefully(self, m_subp):
+ """Test that the error indicating OVS is down is handled gracefully."""
+ m_subp.side_effect = ProcessExecutionError(
+ stderr="database connection failed"
+ )
+
+ assert [] == net.get_ovs_internal_interfaces()
+
+ def test_other_errors_raised(self, m_subp):
+ """Test that only database connection errors are handled."""
+ m_subp.side_effect = ProcessExecutionError()
+
+ with pytest.raises(ProcessExecutionError):
+ net.get_ovs_internal_interfaces()
+
+ def test_only_runs_once(self, m_subp):
+ """Test that we cache the value."""
+ net.get_ovs_internal_interfaces()
+ net.get_ovs_internal_interfaces()
+
+ assert 1 == m_subp.call_count
+
+
+@mock.patch("cloudinit.net.get_ovs_internal_interfaces")
+@mock.patch("cloudinit.net.openvswitch_is_installed")
+class TestIsOpenVSwitchInternalInterface:
+ def test_false_if_ovs_not_installed(
+ self, m_openvswitch_is_installed, _m_get_ovs_internal_interfaces
+ ):
+ """Test that OVS' absence returns False."""
+ m_openvswitch_is_installed.return_value = False
+
+ assert not net.is_openvswitch_internal_interface("devname")
+
+ @pytest.mark.parametrize(
+ "detected_interfaces,devname,expected_return",
+ [
+ ([], "devname", False),
+ (["notdevname"], "devname", False),
+ (["devname"], "devname", True),
+ (["some", "other", "devices", "and", "ours"], "ours", True),
+ ],
+ )
+ def test_return_value_based_on_detected_interfaces(
+ self,
+ m_openvswitch_is_installed,
+ m_get_ovs_internal_interfaces,
+ detected_interfaces,
+ devname,
+ expected_return,
+ ):
+ """Test that the detected interfaces are used correctly."""
+ m_openvswitch_is_installed.return_value = True
+ m_get_ovs_internal_interfaces.return_value = detected_interfaces
+ assert expected_return == net.is_openvswitch_internal_interface(
+ devname
+ )
+
+
+class TestIsIpAddress:
+ """Tests for net.is_ip_address.
+
+ Instead of testing with values we rely on the ipaddress stdlib module to
+ handle all values correctly, so simply test that is_ip_address defers to
+ the ipaddress module correctly.
+ """
+
+ @pytest.mark.parametrize(
+ "ip_address_side_effect,expected_return",
+ (
+ (ValueError, False),
+ (lambda _: ipaddress.IPv4Address("192.168.0.1"), True),
+ (lambda _: ipaddress.IPv6Address("2001:db8::"), True),
+ ),
+ )
+ def test_is_ip_address(self, ip_address_side_effect, expected_return):
+ with mock.patch(
+ "cloudinit.net.ipaddress.ip_address",
+ side_effect=ip_address_side_effect,
+ ) as m_ip_address:
+ ret = net.is_ip_address(mock.sentinel.ip_address_in)
+ assert expected_return == ret
+ expected_call = mock.call(mock.sentinel.ip_address_in)
+ assert [expected_call] == m_ip_address.call_args_list
+
+
+class TestIsIpv4Address:
+ """Tests for net.is_ipv4_address.
+
+ Instead of testing with values we rely on the ipaddress stdlib module to
+ handle all values correctly, so simply test that is_ipv4_address defers to
+ the ipaddress module correctly.
+ """
+
+ @pytest.mark.parametrize(
+ "ipv4address_mock,expected_return",
+ (
+ (mock.Mock(side_effect=ValueError), False),
+ (
+ mock.Mock(return_value=ipaddress.IPv4Address("192.168.0.1")),
+ True,
+ ),
+ ),
+ )
+ def test_is_ip_address(self, ipv4address_mock, expected_return):
+ with mock.patch(
+ "cloudinit.net.ipaddress.IPv4Address", ipv4address_mock
+ ) as m_ipv4address:
+ ret = net.is_ipv4_address(mock.sentinel.ip_address_in)
+ assert expected_return == ret
+ expected_call = mock.call(mock.sentinel.ip_address_in)
+ assert [expected_call] == m_ipv4address.call_args_list
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/net/test_network_state.py b/tests/unittests/net/test_network_state.py
new file mode 100644
index 00000000..471d969a
--- /dev/null
+++ b/tests/unittests/net/test_network_state.py
@@ -0,0 +1,222 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+import ipaddress
+from unittest import mock
+
+import pytest
+
+from cloudinit import safeyaml
+from cloudinit.net import network_state
+from tests.unittests.helpers import CiTestCase
+
+netstate_path = "cloudinit.net.network_state"
+
+
+_V1_CONFIG_NAMESERVERS = """\
+network:
+ version: 1
+ config:
+ - type: nameserver
+ interface: {iface}
+ address:
+ - 192.168.1.1
+ - 8.8.8.8
+ search:
+ - spam.local
+ - type: nameserver
+ address:
+ - 192.168.1.0
+ - 4.4.4.4
+ search:
+ - eggs.local
+ - type: physical
+ name: eth0
+ mac_address: '00:11:22:33:44:55'
+ - type: physical
+ name: eth1
+ mac_address: '66:77:88:99:00:11'
+"""
+
+V1_CONFIG_NAMESERVERS_VALID = _V1_CONFIG_NAMESERVERS.format(iface="eth1")
+V1_CONFIG_NAMESERVERS_INVALID = _V1_CONFIG_NAMESERVERS.format(iface="eth90")
+
+V2_CONFIG_NAMESERVERS = """\
+network:
+ version: 2
+ ethernets:
+ eth0:
+ match:
+ macaddress: '00:11:22:33:44:55'
+ nameservers:
+ search: [spam.local, eggs.local]
+ addresses: [8.8.8.8]
+ eth1:
+ match:
+ macaddress: '66:77:88:99:00:11'
+ set-name: "ens92"
+ nameservers:
+ search: [foo.local, bar.local]
+ addresses: [4.4.4.4]
+"""
+
+
+class TestNetworkStateParseConfig(CiTestCase):
+ def setUp(self):
+ super(TestNetworkStateParseConfig, self).setUp()
+ nsi_path = netstate_path + ".NetworkStateInterpreter"
+ self.add_patch(nsi_path, "m_nsi")
+
+ def test_missing_version_returns_none(self):
+ ncfg = {}
+ with self.assertRaises(RuntimeError):
+ network_state.parse_net_config_data(ncfg)
+
+ def test_unknown_versions_returns_none(self):
+ ncfg = {"version": 13.2}
+ with self.assertRaises(RuntimeError):
+ network_state.parse_net_config_data(ncfg)
+
+ def test_version_2_passes_self_as_config(self):
+ ncfg = {"version": 2, "otherconfig": {}, "somemore": [1, 2, 3]}
+ network_state.parse_net_config_data(ncfg)
+ self.assertEqual(
+ [mock.call(version=2, config=ncfg)], self.m_nsi.call_args_list
+ )
+
+ def test_valid_config_gets_network_state(self):
+ ncfg = {"version": 2, "otherconfig": {}, "somemore": [1, 2, 3]}
+ result = network_state.parse_net_config_data(ncfg)
+ self.assertNotEqual(None, result)
+
+ def test_empty_v1_config_gets_network_state(self):
+ ncfg = {"version": 1, "config": []}
+ result = network_state.parse_net_config_data(ncfg)
+ self.assertNotEqual(None, result)
+
+ def test_empty_v2_config_gets_network_state(self):
+ ncfg = {"version": 2}
+ result = network_state.parse_net_config_data(ncfg)
+ self.assertNotEqual(None, result)
+
+
+class TestNetworkStateParseConfigV2(CiTestCase):
+ def test_version_2_ignores_renderer_key(self):
+ ncfg = {"version": 2, "renderer": "networkd", "ethernets": {}}
+ nsi = network_state.NetworkStateInterpreter(
+ version=ncfg["version"], config=ncfg
+ )
+ nsi.parse_config(skip_broken=False)
+ self.assertEqual(ncfg, nsi.as_dict()["config"])
+
+
+class TestNetworkStateParseNameservers:
+ def _parse_network_state_from_config(self, config):
+ yaml = safeyaml.load(config)
+ return network_state.parse_net_config_data(yaml["network"])
+
+ def test_v1_nameservers_valid(self):
+ config = self._parse_network_state_from_config(
+ V1_CONFIG_NAMESERVERS_VALID
+ )
+
+ # If an interface was specified, DNS shouldn't be in the global list
+ assert ["192.168.1.0", "4.4.4.4"] == sorted(config.dns_nameservers)
+ assert ["eggs.local"] == config.dns_searchdomains
+
+ # If an interface was specified, DNS should be part of the interface
+ for iface in config.iter_interfaces():
+ if iface["name"] == "eth1":
+ assert iface["dns"]["addresses"] == ["192.168.1.1", "8.8.8.8"]
+ assert iface["dns"]["search"] == ["spam.local"]
+ else:
+ assert "dns" not in iface
+
+ def test_v1_nameservers_invalid(self):
+ with pytest.raises(ValueError):
+ self._parse_network_state_from_config(
+ V1_CONFIG_NAMESERVERS_INVALID
+ )
+
+ def test_v2_nameservers(self):
+ config = self._parse_network_state_from_config(V2_CONFIG_NAMESERVERS)
+
+ # Ensure DNS defined on interface exists on interface
+ for iface in config.iter_interfaces():
+ if iface["name"] == "eth0":
+ assert iface["dns"] == {
+ "nameservers": ["8.8.8.8"],
+ "search": ["spam.local", "eggs.local"],
+ }
+ else:
+ assert iface["dns"] == {
+ "nameservers": ["4.4.4.4"],
+ "search": ["foo.local", "bar.local"],
+ }
+
+ # Ensure DNS defined on interface also exists globally (since there
+ # is no global DNS definitions in v2)
+ assert ["4.4.4.4", "8.8.8.8"] == sorted(config.dns_nameservers)
+ assert [
+ "bar.local",
+ "eggs.local",
+ "foo.local",
+ "spam.local",
+ ] == sorted(config.dns_searchdomains)
+
+
+class TestNetworkStateHelperFunctions(CiTestCase):
+ def test_mask_to_net_prefix_ipv4(self):
+ netmask_value = "255.255.255.0"
+ expected = 24
+ prefix_value = network_state.ipv4_mask_to_net_prefix(netmask_value)
+ assert prefix_value == expected
+
+ def test_mask_to_net_prefix_all_bits_ipv4(self):
+ netmask_value = "255.255.255.255"
+ expected = 32
+ prefix_value = network_state.ipv4_mask_to_net_prefix(netmask_value)
+ assert prefix_value == expected
+
+ def test_mask_to_net_prefix_to_many_bits_ipv4(self):
+ netmask_value = "33"
+ self.assertRaises(
+ ValueError, network_state.ipv4_mask_to_net_prefix, netmask_value
+ )
+
+ def test_mask_to_net_prefix_all_bits_ipv6(self):
+ netmask_value = "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"
+ expected = 128
+ prefix_value = network_state.ipv6_mask_to_net_prefix(netmask_value)
+ assert prefix_value == expected
+
+ def test_mask_to_net_prefix_ipv6(self):
+ netmask_value = "ffff:ffff:ffff:ffff::"
+ expected = 64
+ prefix_value = network_state.ipv6_mask_to_net_prefix(netmask_value)
+ assert prefix_value == expected
+
+ def test_mask_to_net_prefix_raises_value_error(self):
+ netmask_value = "ff:ff:ff:ff::"
+ self.assertRaises(
+ ValueError, network_state.ipv6_mask_to_net_prefix, netmask_value
+ )
+
+ def test_mask_to_net_prefix_to_many_bits_ipv6(self):
+ netmask_value = "129"
+ self.assertRaises(
+ ValueError, network_state.ipv6_mask_to_net_prefix, netmask_value
+ )
+
+ def test_mask_to_net_prefix_ipv4_object(self):
+ netmask_value = ipaddress.IPv4Address("255.255.255.255")
+ expected = 32
+ prefix_value = network_state.ipv4_mask_to_net_prefix(netmask_value)
+ assert prefix_value == expected
+
+ def test_mask_to_net_prefix_ipv6_object(self):
+ netmask_value = ipaddress.IPv6Address("ffff:ffff:ffff::")
+ expected = 48
+ prefix_value = network_state.ipv6_mask_to_net_prefix(netmask_value)
+ assert prefix_value == expected
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/net/test_networkd.py b/tests/unittests/net/test_networkd.py
new file mode 100644
index 00000000..ec1d04e9
--- /dev/null
+++ b/tests/unittests/net/test_networkd.py
@@ -0,0 +1,64 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit import safeyaml
+from cloudinit.net import network_state, networkd
+
+V2_CONFIG_SET_NAME = """\
+network:
+ version: 2
+ ethernets:
+ eth0:
+ match:
+ macaddress: '00:11:22:33:44:55'
+ nameservers:
+ search: [spam.local, eggs.local]
+ addresses: [8.8.8.8]
+ eth1:
+ match:
+ macaddress: '66:77:88:99:00:11'
+ set-name: "ens92"
+ nameservers:
+ search: [foo.local, bar.local]
+ addresses: [4.4.4.4]
+"""
+
+V2_CONFIG_SET_NAME_RENDERED_ETH0 = """[Match]
+MACAddress=00:11:22:33:44:55
+Name=eth0
+
+[Network]
+DHCP=no
+DNS=8.8.8.8
+Domains=spam.local eggs.local
+
+"""
+
+V2_CONFIG_SET_NAME_RENDERED_ETH1 = """[Match]
+MACAddress=66:77:88:99:00:11
+Name=ens92
+
+[Network]
+DHCP=no
+DNS=4.4.4.4
+Domains=foo.local bar.local
+
+"""
+
+
+class TestNetworkdRenderState:
+ def _parse_network_state_from_config(self, config):
+ yaml = safeyaml.load(config)
+ return network_state.parse_net_config_data(yaml["network"])
+
+ def test_networkd_render_with_set_name(self):
+ ns = self._parse_network_state_from_config(V2_CONFIG_SET_NAME)
+ renderer = networkd.Renderer()
+ rendered_content = renderer._render_content(ns)
+
+ assert "eth0" in rendered_content
+ assert rendered_content["eth0"] == V2_CONFIG_SET_NAME_RENDERED_ETH0
+ assert "ens92" in rendered_content
+ assert rendered_content["ens92"] == V2_CONFIG_SET_NAME_RENDERED_ETH1
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/__init__.py b/tests/unittests/runs/__init__.py
index e69de29b..e69de29b 100644
--- a/tests/unittests/test_handler/__init__.py
+++ b/tests/unittests/runs/__init__.py
diff --git a/tests/unittests/runs/test_merge_run.py b/tests/unittests/runs/test_merge_run.py
new file mode 100644
index 00000000..1b1b5595
--- /dev/null
+++ b/tests/unittests/runs/test_merge_run.py
@@ -0,0 +1,61 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import os
+import shutil
+import tempfile
+
+from cloudinit import safeyaml, stages, util
+from cloudinit.settings import PER_INSTANCE
+from tests.unittests import helpers
+
+
+class TestMergeRun(helpers.FilesystemMockingTestCase):
+ def _patchIn(self, root):
+ self.patchOS(root)
+ self.patchUtils(root)
+
+ def test_none_ds(self):
+ new_root = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, new_root)
+ self.replicateTestRoot("simple_ubuntu", new_root)
+ cfg = {
+ "datasource_list": ["None"],
+ "cloud_init_modules": ["write-files"],
+ "system_info": {"paths": {"run_dir": new_root}},
+ }
+ ud = helpers.readResource("user_data.1.txt")
+ cloud_cfg = safeyaml.dumps(cfg)
+ util.ensure_dir(os.path.join(new_root, "etc", "cloud"))
+ util.write_file(
+ os.path.join(new_root, "etc", "cloud", "cloud.cfg"), cloud_cfg
+ )
+ self._patchIn(new_root)
+
+ # Now start verifying whats created
+ initer = stages.Init()
+ initer.read_cfg()
+ initer.initialize()
+ initer.fetch()
+ initer.datasource.userdata_raw = ud
+ initer.instancify()
+ initer.update()
+ initer.cloudify().run(
+ "consume_data",
+ initer.consume_data,
+ args=[PER_INSTANCE],
+ freq=PER_INSTANCE,
+ )
+ mirrors = initer.distro.get_option("package_mirrors")
+ self.assertEqual(1, len(mirrors))
+ mirror = mirrors[0]
+ self.assertEqual(mirror["arches"], ["i386", "amd64", "blah"])
+ mods = stages.Modules(initer)
+ (which_ran, failures) = mods.run_section("cloud_init_modules")
+ self.assertTrue(len(failures) == 0)
+ self.assertTrue(os.path.exists("/etc/blah.ini"))
+ self.assertIn("write-files", which_ran)
+ contents = util.load_file("/etc/blah.ini")
+ self.assertEqual(contents, "blah")
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_runs/test_simple_run.py b/tests/unittests/runs/test_simple_run.py
index cb3aae60..38cf9494 100644
--- a/tests/unittests/test_runs/test_simple_run.py
+++ b/tests/unittests/runs/test_simple_run.py
@@ -3,12 +3,9 @@
import copy
import os
-
+from cloudinit import safeyaml, stages, util
from cloudinit.settings import PER_INSTANCE
-from cloudinit import safeyaml
-from cloudinit import stages
-from cloudinit.tests import helpers
-from cloudinit import util
+from tests.unittests import helpers
class TestSimpleRun(helpers.FilesystemMockingTestCase):
@@ -18,27 +15,28 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase):
def setUp(self):
super(TestSimpleRun, self).setUp()
self.new_root = self.tmp_dir()
- self.replicateTestRoot('simple_ubuntu', self.new_root)
+ self.replicateTestRoot("simple_ubuntu", self.new_root)
# Seed cloud.cfg file for our tests
self.cfg = {
- 'datasource_list': ['None'],
- 'runcmd': ['ls /etc'], # test ALL_DISTROS
- 'spacewalk': {}, # test non-ubuntu distros module definition
- 'system_info': {'paths': {'run_dir': self.new_root}},
- 'write_files': [
+ "datasource_list": ["None"],
+ "runcmd": ["ls /etc"], # test ALL_DISTROS
+ "spacewalk": {}, # test non-ubuntu distros module definition
+ "system_info": {"paths": {"run_dir": self.new_root}},
+ "write_files": [
{
- 'path': '/etc/blah.ini',
- 'content': 'blah',
- 'permissions': 0o755,
+ "path": "/etc/blah.ini",
+ "content": "blah",
+ "permissions": 0o755,
},
],
- 'cloud_init_modules': ['write-files', 'spacewalk', 'runcmd'],
+ "cloud_init_modules": ["write-files", "spacewalk", "runcmd"],
}
cloud_cfg = safeyaml.dumps(self.cfg)
- util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud'))
- util.write_file(os.path.join(self.new_root, 'etc',
- 'cloud', 'cloud.cfg'), cloud_cfg)
+ util.ensure_dir(os.path.join(self.new_root, "etc", "cloud"))
+ util.write_file(
+ os.path.join(self.new_root, "etc", "cloud", "cloud.cfg"), cloud_cfg
+ )
self.patchOS(self.new_root)
self.patchUtils(self.new_root)
@@ -49,12 +47,12 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase):
initer.read_cfg()
initer.initialize()
self.assertTrue(os.path.exists("/var/lib/cloud"))
- for d in ['scripts', 'seed', 'instances', 'handlers', 'sem', 'data']:
+ for d in ["scripts", "seed", "instances", "handlers", "sem", "data"]:
self.assertTrue(os.path.isdir(os.path.join("/var/lib/cloud", d)))
initer.fetch()
iid = initer.instancify()
- self.assertEqual(iid, 'iid-datasource-none')
+ self.assertEqual(iid, "iid-datasource-none")
initer.update()
self.assertTrue(os.path.islink("var/lib/cloud/instance"))
@@ -66,20 +64,25 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase):
initer.fetch()
initer.instancify()
initer.update()
- initer.cloudify().run('consume_data', initer.consume_data,
- args=[PER_INSTANCE], freq=PER_INSTANCE)
+ initer.cloudify().run(
+ "consume_data",
+ initer.consume_data,
+ args=[PER_INSTANCE],
+ freq=PER_INSTANCE,
+ )
mods = stages.Modules(initer)
- (which_ran, failures) = mods.run_section('cloud_init_modules')
+ (which_ran, failures) = mods.run_section("cloud_init_modules")
self.assertTrue(len(failures) == 0)
- self.assertTrue(os.path.exists('/etc/blah.ini'))
- self.assertIn('write-files', which_ran)
- contents = util.load_file('/etc/blah.ini')
- self.assertEqual(contents, 'blah')
+ self.assertTrue(os.path.exists("/etc/blah.ini"))
+ self.assertIn("write-files", which_ran)
+ contents = util.load_file("/etc/blah.ini")
+ self.assertEqual(contents, "blah")
self.assertNotIn(
"Skipping modules ['write-files'] because they are not verified on"
" distro 'ubuntu'",
- self.logs.getvalue())
+ self.logs.getvalue(),
+ )
def test_none_ds_skips_modules_which_define_unmatched_distros(self):
"""Skip modules which define distros which don't match the current."""
@@ -89,17 +92,22 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase):
initer.fetch()
initer.instancify()
initer.update()
- initer.cloudify().run('consume_data', initer.consume_data,
- args=[PER_INSTANCE], freq=PER_INSTANCE)
+ initer.cloudify().run(
+ "consume_data",
+ initer.consume_data,
+ args=[PER_INSTANCE],
+ freq=PER_INSTANCE,
+ )
mods = stages.Modules(initer)
- (which_ran, failures) = mods.run_section('cloud_init_modules')
+ (which_ran, failures) = mods.run_section("cloud_init_modules")
self.assertTrue(len(failures) == 0)
self.assertIn(
"Skipping modules 'spacewalk' because they are not verified on"
" distro 'ubuntu'",
- self.logs.getvalue())
- self.assertNotIn('spacewalk', which_ran)
+ self.logs.getvalue(),
+ )
+ self.assertNotIn("spacewalk", which_ran)
def test_none_ds_runs_modules_which_distros_all(self):
"""Skip modules which define distros attribute as supporting 'all'.
@@ -113,28 +121,34 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase):
initer.fetch()
initer.instancify()
initer.update()
- initer.cloudify().run('consume_data', initer.consume_data,
- args=[PER_INSTANCE], freq=PER_INSTANCE)
+ initer.cloudify().run(
+ "consume_data",
+ initer.consume_data,
+ args=[PER_INSTANCE],
+ freq=PER_INSTANCE,
+ )
mods = stages.Modules(initer)
- (which_ran, failures) = mods.run_section('cloud_init_modules')
+ (which_ran, failures) = mods.run_section("cloud_init_modules")
self.assertTrue(len(failures) == 0)
- self.assertIn('runcmd', which_ran)
+ self.assertIn("runcmd", which_ran)
self.assertNotIn(
"Skipping modules 'runcmd' because they are not verified on"
" distro 'ubuntu'",
- self.logs.getvalue())
+ self.logs.getvalue(),
+ )
def test_none_ds_forces_run_via_unverified_modules(self):
"""run_section forced skipped modules by using unverified_modules."""
# re-write cloud.cfg with unverified_modules override
cfg = copy.deepcopy(self.cfg)
- cfg['unverified_modules'] = ['spacewalk'] # Would have skipped
+ cfg["unverified_modules"] = ["spacewalk"] # Would have skipped
cloud_cfg = safeyaml.dumps(cfg)
- util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud'))
- util.write_file(os.path.join(self.new_root, 'etc',
- 'cloud', 'cloud.cfg'), cloud_cfg)
+ util.ensure_dir(os.path.join(self.new_root, "etc", "cloud"))
+ util.write_file(
+ os.path.join(self.new_root, "etc", "cloud", "cloud.cfg"), cloud_cfg
+ )
initer = stages.Init()
initer.read_cfg()
@@ -142,16 +156,20 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase):
initer.fetch()
initer.instancify()
initer.update()
- initer.cloudify().run('consume_data', initer.consume_data,
- args=[PER_INSTANCE], freq=PER_INSTANCE)
+ initer.cloudify().run(
+ "consume_data",
+ initer.consume_data,
+ args=[PER_INSTANCE],
+ freq=PER_INSTANCE,
+ )
mods = stages.Modules(initer)
- (which_ran, failures) = mods.run_section('cloud_init_modules')
+ (which_ran, failures) = mods.run_section("cloud_init_modules")
self.assertTrue(len(failures) == 0)
- self.assertIn('spacewalk', which_ran)
+ self.assertIn("spacewalk", which_ran)
self.assertIn(
- "running unverified_modules: 'spacewalk'",
- self.logs.getvalue())
+ "running unverified_modules: 'spacewalk'", self.logs.getvalue()
+ )
def test_none_ds_run_with_no_config_modules(self):
"""run_section will report no modules run when none are configured."""
@@ -159,11 +177,12 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase):
# re-write cloud.cfg with unverified_modules override
cfg = copy.deepcopy(self.cfg)
# Represent empty configuration in /etc/cloud/cloud.cfg
- cfg['cloud_init_modules'] = None
+ cfg["cloud_init_modules"] = None
cloud_cfg = safeyaml.dumps(cfg)
- util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud'))
- util.write_file(os.path.join(self.new_root, 'etc',
- 'cloud', 'cloud.cfg'), cloud_cfg)
+ util.ensure_dir(os.path.join(self.new_root, "etc", "cloud"))
+ util.write_file(
+ os.path.join(self.new_root, "etc", "cloud", "cloud.cfg"), cloud_cfg
+ )
initer = stages.Init()
initer.read_cfg()
@@ -171,12 +190,17 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase):
initer.fetch()
initer.instancify()
initer.update()
- initer.cloudify().run('consume_data', initer.consume_data,
- args=[PER_INSTANCE], freq=PER_INSTANCE)
+ initer.cloudify().run(
+ "consume_data",
+ initer.consume_data,
+ args=[PER_INSTANCE],
+ freq=PER_INSTANCE,
+ )
mods = stages.Modules(initer)
- (which_ran, failures) = mods.run_section('cloud_init_modules')
+ (which_ran, failures) = mods.run_section("cloud_init_modules")
self.assertTrue(len(failures) == 0)
self.assertEqual([], which_ran)
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_runs/__init__.py b/tests/unittests/sources/__init__.py
index e69de29b..e69de29b 100644
--- a/tests/unittests/test_runs/__init__.py
+++ b/tests/unittests/sources/__init__.py
diff --git a/tests/unittests/sources/helpers/test_netlink.py b/tests/unittests/sources/helpers/test_netlink.py
new file mode 100644
index 00000000..5eabf104
--- /dev/null
+++ b/tests/unittests/sources/helpers/test_netlink.py
@@ -0,0 +1,573 @@
+# Author: Tamilmani Manoharan <tamanoha@microsoft.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import codecs
+import socket
+import struct
+
+from cloudinit.sources.helpers.netlink import (
+ MAX_SIZE,
+ OPER_DORMANT,
+ OPER_DOWN,
+ OPER_LOWERLAYERDOWN,
+ OPER_NOTPRESENT,
+ OPER_TESTING,
+ OPER_UNKNOWN,
+ OPER_UP,
+ RTATTR_START_OFFSET,
+ RTM_DELLINK,
+ RTM_GETLINK,
+ RTM_NEWLINK,
+ RTM_SETLINK,
+ NetlinkCreateSocketError,
+ create_bound_netlink_socket,
+ read_netlink_socket,
+ read_rta_oper_state,
+ unpack_rta_attr,
+ wait_for_media_disconnect_connect,
+ wait_for_nic_attach_event,
+ wait_for_nic_detach_event,
+)
+from tests.unittests.helpers import CiTestCase, mock
+
+
+def int_to_bytes(i):
+ """convert integer to binary: eg: 1 to \x01"""
+ hex_value = "{0:x}".format(i)
+ hex_value = "0" * (len(hex_value) % 2) + hex_value
+ return codecs.decode(hex_value, "hex_codec")
+
+
+class TestCreateBoundNetlinkSocket(CiTestCase):
+ @mock.patch("cloudinit.sources.helpers.netlink.socket.socket")
+ def test_socket_error_on_create(self, m_socket):
+ """create_bound_netlink_socket catches socket creation exception"""
+
+ """NetlinkCreateSocketError is raised when socket creation errors."""
+ m_socket.side_effect = socket.error("Fake socket failure")
+ with self.assertRaises(NetlinkCreateSocketError) as ctx_mgr:
+ create_bound_netlink_socket()
+ self.assertEqual(
+ "Exception during netlink socket create: Fake socket failure",
+ str(ctx_mgr.exception),
+ )
+
+
+class TestReadNetlinkSocket(CiTestCase):
+ @mock.patch("cloudinit.sources.helpers.netlink.socket.socket")
+ @mock.patch("cloudinit.sources.helpers.netlink.select.select")
+ def test_read_netlink_socket(self, m_select, m_socket):
+ """read_netlink_socket able to receive data"""
+ data = "netlinktest"
+ m_select.return_value = [m_socket], None, None
+ m_socket.recv.return_value = data
+ recv_data = read_netlink_socket(m_socket, 2)
+ m_select.assert_called_with([m_socket], [], [], 2)
+ m_socket.recv.assert_called_with(MAX_SIZE)
+ self.assertIsNotNone(recv_data)
+ self.assertEqual(recv_data, data)
+
+ @mock.patch("cloudinit.sources.helpers.netlink.socket.socket")
+ @mock.patch("cloudinit.sources.helpers.netlink.select.select")
+ def test_netlink_read_timeout(self, m_select, m_socket):
+ """read_netlink_socket should timeout if nothing to read"""
+ m_select.return_value = [], None, None
+ data = read_netlink_socket(m_socket, 1)
+ m_select.assert_called_with([m_socket], [], [], 1)
+ self.assertEqual(m_socket.recv.call_count, 0)
+ self.assertIsNone(data)
+
+ def test_read_invalid_socket(self):
+ """read_netlink_socket raises assert error if socket is invalid"""
+ socket = None
+ with self.assertRaises(AssertionError) as context:
+ read_netlink_socket(socket, 1)
+ self.assertTrue("netlink socket is none" in str(context.exception))
+
+
+class TestParseNetlinkMessage(CiTestCase):
+ def test_read_rta_oper_state(self):
+ """read_rta_oper_state could parse netlink message and extract data"""
+ ifname = "eth0"
+ bytes = ifname.encode("utf-8")
+ buf = bytearray(48)
+ struct.pack_into(
+ "HH4sHHc",
+ buf,
+ RTATTR_START_OFFSET,
+ 8,
+ 3,
+ bytes,
+ 5,
+ 16,
+ int_to_bytes(OPER_DOWN),
+ )
+ interface_state = read_rta_oper_state(buf)
+ self.assertEqual(interface_state.ifname, ifname)
+ self.assertEqual(interface_state.operstate, OPER_DOWN)
+
+ def test_read_none_data(self):
+ """read_rta_oper_state raises assert error if data is none"""
+ data = None
+ with self.assertRaises(AssertionError) as context:
+ read_rta_oper_state(data)
+ self.assertEqual("data is none", str(context.exception))
+
+ def test_read_invalid_rta_operstate_none(self):
+ """read_rta_oper_state returns none if operstate is none"""
+ ifname = "eth0"
+ buf = bytearray(40)
+ bytes = ifname.encode("utf-8")
+ struct.pack_into("HH4s", buf, RTATTR_START_OFFSET, 8, 3, bytes)
+ interface_state = read_rta_oper_state(buf)
+ self.assertIsNone(interface_state)
+
+ def test_read_invalid_rta_ifname_none(self):
+ """read_rta_oper_state returns none if ifname is none"""
+ buf = bytearray(40)
+ struct.pack_into(
+ "HHc", buf, RTATTR_START_OFFSET, 5, 16, int_to_bytes(OPER_DOWN)
+ )
+ interface_state = read_rta_oper_state(buf)
+ self.assertIsNone(interface_state)
+
+ def test_read_invalid_data_len(self):
+ """raise assert error if data size is smaller than required size"""
+ buf = bytearray(32)
+ with self.assertRaises(AssertionError) as context:
+ read_rta_oper_state(buf)
+ self.assertTrue(
+ "length of data is smaller than RTATTR_START_OFFSET"
+ in str(context.exception)
+ )
+
+ def test_unpack_rta_attr_none_data(self):
+ """unpack_rta_attr raises assert error if data is none"""
+ data = None
+ with self.assertRaises(AssertionError) as context:
+ unpack_rta_attr(data, RTATTR_START_OFFSET)
+ self.assertTrue("data is none" in str(context.exception))
+
+ def test_unpack_rta_attr_invalid_offset(self):
+ """unpack_rta_attr raises assert error if offset is invalid"""
+ data = bytearray(48)
+ with self.assertRaises(AssertionError) as context:
+ unpack_rta_attr(data, "offset")
+ self.assertTrue("offset is not integer" in str(context.exception))
+ with self.assertRaises(AssertionError) as context:
+ unpack_rta_attr(data, 31)
+ self.assertTrue(
+ "rta offset is less than expected length" in str(context.exception)
+ )
+
+
+@mock.patch("cloudinit.sources.helpers.netlink.socket.socket")
+@mock.patch("cloudinit.sources.helpers.netlink.read_netlink_socket")
+class TestNicAttachDetach(CiTestCase):
+ with_logs = True
+
+ def _media_switch_data(self, ifname, msg_type, operstate):
+ """construct netlink data with specified fields"""
+ if ifname and operstate is not None:
+ data = bytearray(48)
+ bytes = ifname.encode("utf-8")
+ struct.pack_into(
+ "HH4sHHc",
+ data,
+ RTATTR_START_OFFSET,
+ 8,
+ 3,
+ bytes,
+ 5,
+ 16,
+ int_to_bytes(operstate),
+ )
+ elif ifname:
+ data = bytearray(40)
+ bytes = ifname.encode("utf-8")
+ struct.pack_into("HH4s", data, RTATTR_START_OFFSET, 8, 3, bytes)
+ elif operstate:
+ data = bytearray(40)
+ struct.pack_into(
+ "HHc",
+ data,
+ RTATTR_START_OFFSET,
+ 5,
+ 16,
+ int_to_bytes(operstate),
+ )
+ struct.pack_into("=LHHLL", data, 0, len(data), msg_type, 0, 0, 0)
+ return data
+
+ def test_nic_attached_oper_down(self, m_read_netlink_socket, m_socket):
+ """Test for a new nic attached"""
+ ifname = "eth0"
+ data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
+ m_read_netlink_socket.side_effect = [data_op_down]
+ ifread = wait_for_nic_attach_event(m_socket, [])
+ self.assertEqual(m_read_netlink_socket.call_count, 1)
+ self.assertEqual(ifname, ifread)
+
+ def test_nic_attached_oper_up(self, m_read_netlink_socket, m_socket):
+ """Test for a new nic attached"""
+ ifname = "eth0"
+ data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
+ m_read_netlink_socket.side_effect = [data_op_up]
+ ifread = wait_for_nic_attach_event(m_socket, [])
+ self.assertEqual(m_read_netlink_socket.call_count, 1)
+ self.assertEqual(ifname, ifread)
+
+ def test_nic_attach_ignore_existing(self, m_read_netlink_socket, m_socket):
+ """Test that we read only the interfaces we are interested in."""
+ data_eth0 = self._media_switch_data("eth0", RTM_NEWLINK, OPER_DOWN)
+ data_eth1 = self._media_switch_data("eth1", RTM_NEWLINK, OPER_DOWN)
+ m_read_netlink_socket.side_effect = [data_eth0, data_eth1]
+ ifread = wait_for_nic_attach_event(m_socket, ["eth0"])
+ self.assertEqual(m_read_netlink_socket.call_count, 2)
+ self.assertEqual("eth1", ifread)
+
+ def test_nic_attach_read_first(self, m_read_netlink_socket, m_socket):
+ """Test that we read only the interfaces we are interested in."""
+ data_eth0 = self._media_switch_data("eth0", RTM_NEWLINK, OPER_DOWN)
+ data_eth1 = self._media_switch_data("eth1", RTM_NEWLINK, OPER_DOWN)
+ m_read_netlink_socket.side_effect = [data_eth0, data_eth1]
+ ifread = wait_for_nic_attach_event(m_socket, ["eth1"])
+ self.assertEqual(m_read_netlink_socket.call_count, 1)
+ self.assertEqual("eth0", ifread)
+
+ def test_nic_detached(self, m_read_netlink_socket, m_socket):
+ """Test for an existing nic detached"""
+ ifname = "eth0"
+ data_op_down = self._media_switch_data(ifname, RTM_DELLINK, OPER_DOWN)
+ m_read_netlink_socket.side_effect = [data_op_down]
+ ifread = wait_for_nic_detach_event(m_socket)
+ self.assertEqual(m_read_netlink_socket.call_count, 1)
+ self.assertEqual(ifname, ifread)
+
+
+@mock.patch("cloudinit.sources.helpers.netlink.socket.socket")
+@mock.patch("cloudinit.sources.helpers.netlink.read_netlink_socket")
+class TestWaitForMediaDisconnectConnect(CiTestCase):
+ with_logs = True
+
+ def _media_switch_data(self, ifname, msg_type, operstate):
+ """construct netlink data with specified fields"""
+ if ifname and operstate is not None:
+ data = bytearray(48)
+ bytes = ifname.encode("utf-8")
+ struct.pack_into(
+ "HH4sHHc",
+ data,
+ RTATTR_START_OFFSET,
+ 8,
+ 3,
+ bytes,
+ 5,
+ 16,
+ int_to_bytes(operstate),
+ )
+ elif ifname:
+ data = bytearray(40)
+ bytes = ifname.encode("utf-8")
+ struct.pack_into("HH4s", data, RTATTR_START_OFFSET, 8, 3, bytes)
+ elif operstate:
+ data = bytearray(40)
+ struct.pack_into(
+ "HHc",
+ data,
+ RTATTR_START_OFFSET,
+ 5,
+ 16,
+ int_to_bytes(operstate),
+ )
+ struct.pack_into("=LHHLL", data, 0, len(data), msg_type, 0, 0, 0)
+ return data
+
+ def test_media_down_up_scenario(self, m_read_netlink_socket, m_socket):
+ """Test for media down up sequence for required interface name"""
+ ifname = "eth0"
+ # construct data for Oper State down
+ data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
+ # construct data for Oper State up
+ data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
+ m_read_netlink_socket.side_effect = [data_op_down, data_op_up]
+ wait_for_media_disconnect_connect(m_socket, ifname)
+ self.assertEqual(m_read_netlink_socket.call_count, 2)
+
+ def test_wait_for_media_switch_diff_interface(
+ self, m_read_netlink_socket, m_socket
+ ):
+ """wait_for_media_disconnect_connect ignores unexpected interfaces.
+
+ The first two messages are for other interfaces and last two are for
+ expected interface. So the function exit only after receiving last
+ 2 messages and therefore the call count for m_read_netlink_socket
+ has to be 4
+ """
+ other_ifname = "eth1"
+ expected_ifname = "eth0"
+ data_op_down_eth1 = self._media_switch_data(
+ other_ifname, RTM_NEWLINK, OPER_DOWN
+ )
+ data_op_up_eth1 = self._media_switch_data(
+ other_ifname, RTM_NEWLINK, OPER_UP
+ )
+ data_op_down_eth0 = self._media_switch_data(
+ expected_ifname, RTM_NEWLINK, OPER_DOWN
+ )
+ data_op_up_eth0 = self._media_switch_data(
+ expected_ifname, RTM_NEWLINK, OPER_UP
+ )
+ m_read_netlink_socket.side_effect = [
+ data_op_down_eth1,
+ data_op_up_eth1,
+ data_op_down_eth0,
+ data_op_up_eth0,
+ ]
+ wait_for_media_disconnect_connect(m_socket, expected_ifname)
+ self.assertIn(
+ "Ignored netlink event on interface %s" % other_ifname,
+ self.logs.getvalue(),
+ )
+ self.assertEqual(m_read_netlink_socket.call_count, 4)
+
+ def test_invalid_msgtype_getlink(self, m_read_netlink_socket, m_socket):
+ """wait_for_media_disconnect_connect ignores GETLINK events.
+
+ The first two messages are for oper down and up for RTM_GETLINK type
+ which netlink module will ignore. The last 2 messages are RTM_NEWLINK
+ with oper state down and up messages. Therefore the call count for
+ m_read_netlink_socket has to be 4 ignoring first 2 messages
+ of RTM_GETLINK
+ """
+ ifname = "eth0"
+ data_getlink_down = self._media_switch_data(
+ ifname, RTM_GETLINK, OPER_DOWN
+ )
+ data_getlink_up = self._media_switch_data(ifname, RTM_GETLINK, OPER_UP)
+ data_newlink_down = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_DOWN
+ )
+ data_newlink_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
+ m_read_netlink_socket.side_effect = [
+ data_getlink_down,
+ data_getlink_up,
+ data_newlink_down,
+ data_newlink_up,
+ ]
+ wait_for_media_disconnect_connect(m_socket, ifname)
+ self.assertEqual(m_read_netlink_socket.call_count, 4)
+
+ def test_invalid_msgtype_setlink(self, m_read_netlink_socket, m_socket):
+ """wait_for_media_disconnect_connect ignores SETLINK events.
+
+ The first two messages are for oper down and up for RTM_GETLINK type
+ which it will ignore. 3rd and 4th messages are RTM_NEWLINK with down
+ and up messages. This function should exit after 4th messages since it
+ sees down->up scenario. So the call count for m_read_netlink_socket
+ has to be 4 ignoring first 2 messages of RTM_GETLINK and
+ last 2 messages of RTM_NEWLINK
+ """
+ ifname = "eth0"
+ data_setlink_down = self._media_switch_data(
+ ifname, RTM_SETLINK, OPER_DOWN
+ )
+ data_setlink_up = self._media_switch_data(ifname, RTM_SETLINK, OPER_UP)
+ data_newlink_down = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_DOWN
+ )
+ data_newlink_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
+ m_read_netlink_socket.side_effect = [
+ data_setlink_down,
+ data_setlink_up,
+ data_newlink_down,
+ data_newlink_up,
+ data_newlink_down,
+ data_newlink_up,
+ ]
+ wait_for_media_disconnect_connect(m_socket, ifname)
+ self.assertEqual(m_read_netlink_socket.call_count, 4)
+
+ def test_netlink_invalid_switch_scenario(
+ self, m_read_netlink_socket, m_socket
+ ):
+ """returns only if it receives UP event after a DOWN event"""
+ ifname = "eth0"
+ data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
+ data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
+ data_op_dormant = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_DORMANT
+ )
+ data_op_notpresent = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_NOTPRESENT
+ )
+ data_op_lowerdown = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_LOWERLAYERDOWN
+ )
+ data_op_testing = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_TESTING
+ )
+ data_op_unknown = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_UNKNOWN
+ )
+ m_read_netlink_socket.side_effect = [
+ data_op_up,
+ data_op_up,
+ data_op_dormant,
+ data_op_up,
+ data_op_notpresent,
+ data_op_up,
+ data_op_lowerdown,
+ data_op_up,
+ data_op_testing,
+ data_op_up,
+ data_op_unknown,
+ data_op_up,
+ data_op_down,
+ data_op_up,
+ ]
+ wait_for_media_disconnect_connect(m_socket, ifname)
+ self.assertEqual(m_read_netlink_socket.call_count, 14)
+
+ def test_netlink_valid_inbetween_transitions(
+ self, m_read_netlink_socket, m_socket
+ ):
+ """wait_for_media_disconnect_connect handles in between transitions"""
+ ifname = "eth0"
+ data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
+ data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
+ data_op_dormant = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_DORMANT
+ )
+ data_op_unknown = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_UNKNOWN
+ )
+ m_read_netlink_socket.side_effect = [
+ data_op_down,
+ data_op_dormant,
+ data_op_unknown,
+ data_op_up,
+ ]
+ wait_for_media_disconnect_connect(m_socket, ifname)
+ self.assertEqual(m_read_netlink_socket.call_count, 4)
+
+ def test_netlink_invalid_operstate(self, m_read_netlink_socket, m_socket):
+ """wait_for_media_disconnect_connect should handle invalid operstates.
+
+ The function should not fail and return even if it receives invalid
+ operstates. It always should wait for down up sequence.
+ """
+ ifname = "eth0"
+ data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
+ data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
+ data_op_invalid = self._media_switch_data(ifname, RTM_NEWLINK, 7)
+ m_read_netlink_socket.side_effect = [
+ data_op_invalid,
+ data_op_up,
+ data_op_down,
+ data_op_invalid,
+ data_op_up,
+ ]
+ wait_for_media_disconnect_connect(m_socket, ifname)
+ self.assertEqual(m_read_netlink_socket.call_count, 5)
+
+ def test_wait_invalid_socket(self, m_read_netlink_socket, m_socket):
+ """wait_for_media_disconnect_connect handle none netlink socket."""
+ socket = None
+ ifname = "eth0"
+ with self.assertRaises(AssertionError) as context:
+ wait_for_media_disconnect_connect(socket, ifname)
+ self.assertTrue("netlink socket is none" in str(context.exception))
+
+ def test_wait_invalid_ifname(self, m_read_netlink_socket, m_socket):
+ """wait_for_media_disconnect_connect handle none interface name"""
+ ifname = None
+ with self.assertRaises(AssertionError) as context:
+ wait_for_media_disconnect_connect(m_socket, ifname)
+ self.assertTrue("interface name is none" in str(context.exception))
+ ifname = ""
+ with self.assertRaises(AssertionError) as context:
+ wait_for_media_disconnect_connect(m_socket, ifname)
+ self.assertTrue(
+ "interface name cannot be empty" in str(context.exception)
+ )
+
+ def test_wait_invalid_rta_attr(self, m_read_netlink_socket, m_socket):
+ """wait_for_media_disconnect_connect handles invalid rta data"""
+ ifname = "eth0"
+ data_invalid1 = self._media_switch_data(None, RTM_NEWLINK, OPER_DOWN)
+ data_invalid2 = self._media_switch_data(ifname, RTM_NEWLINK, None)
+ data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
+ data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
+ m_read_netlink_socket.side_effect = [
+ data_invalid1,
+ data_invalid2,
+ data_op_down,
+ data_op_up,
+ ]
+ wait_for_media_disconnect_connect(m_socket, ifname)
+ self.assertEqual(m_read_netlink_socket.call_count, 4)
+
+ def test_read_multiple_netlink_msgs(self, m_read_netlink_socket, m_socket):
+ """Read multiple messages in single receive call"""
+ ifname = "eth0"
+ bytes = ifname.encode("utf-8")
+ data = bytearray(96)
+ struct.pack_into("=LHHLL", data, 0, 48, RTM_NEWLINK, 0, 0, 0)
+ struct.pack_into(
+ "HH4sHHc",
+ data,
+ RTATTR_START_OFFSET,
+ 8,
+ 3,
+ bytes,
+ 5,
+ 16,
+ int_to_bytes(OPER_DOWN),
+ )
+ struct.pack_into("=LHHLL", data, 48, 48, RTM_NEWLINK, 0, 0, 0)
+ struct.pack_into(
+ "HH4sHHc",
+ data,
+ 48 + RTATTR_START_OFFSET,
+ 8,
+ 3,
+ bytes,
+ 5,
+ 16,
+ int_to_bytes(OPER_UP),
+ )
+ m_read_netlink_socket.return_value = data
+ wait_for_media_disconnect_connect(m_socket, ifname)
+ self.assertEqual(m_read_netlink_socket.call_count, 1)
+
+ def test_read_partial_netlink_msgs(self, m_read_netlink_socket, m_socket):
+ """Read partial messages in receive call"""
+ ifname = "eth0"
+ bytes = ifname.encode("utf-8")
+ data1 = bytearray(112)
+ data2 = bytearray(32)
+ struct.pack_into("=LHHLL", data1, 0, 48, RTM_NEWLINK, 0, 0, 0)
+ struct.pack_into(
+ "HH4sHHc",
+ data1,
+ RTATTR_START_OFFSET,
+ 8,
+ 3,
+ bytes,
+ 5,
+ 16,
+ int_to_bytes(OPER_DOWN),
+ )
+ struct.pack_into("=LHHLL", data1, 48, 48, RTM_NEWLINK, 0, 0, 0)
+ struct.pack_into(
+ "HH4sHHc", data1, 80, 8, 3, bytes, 5, 16, int_to_bytes(OPER_DOWN)
+ )
+ struct.pack_into("=LHHLL", data1, 96, 48, RTM_NEWLINK, 0, 0, 0)
+ struct.pack_into(
+ "HH4sHHc", data2, 16, 8, 3, bytes, 5, 16, int_to_bytes(OPER_UP)
+ )
+ m_read_netlink_socket.side_effect = [data1, data2]
+ wait_for_media_disconnect_connect(m_socket, ifname)
+ self.assertEqual(m_read_netlink_socket.call_count, 2)
diff --git a/tests/unittests/sources/helpers/test_openstack.py b/tests/unittests/sources/helpers/test_openstack.py
new file mode 100644
index 00000000..eb87b1ce
--- /dev/null
+++ b/tests/unittests/sources/helpers/test_openstack.py
@@ -0,0 +1,62 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+# ./cloudinit/sources/helpers/tests/test_openstack.py
+from unittest import mock
+
+from cloudinit.sources.helpers import openstack
+from tests.unittests import helpers as test_helpers
+
+
+@mock.patch(
+ "cloudinit.net.is_openvswitch_internal_interface",
+ mock.Mock(return_value=False),
+)
+class TestConvertNetJson(test_helpers.CiTestCase):
+ def test_phy_types(self):
+ """Verify the different known physical types are handled."""
+ # network_data.json example from
+ # https://docs.openstack.org/nova/latest/user/metadata.html
+ mac0 = "fa:16:3e:9c:bf:3d"
+ net_json = {
+ "links": [
+ {
+ "ethernet_mac_address": mac0,
+ "id": "tapcd9f6d46-4a",
+ "mtu": None,
+ "type": "bridge",
+ "vif_id": "cd9f6d46-4a3a-43ab-a466-994af9db96fc",
+ }
+ ],
+ "networks": [
+ {
+ "id": "network0",
+ "link": "tapcd9f6d46-4a",
+ "network_id": "99e88329-f20d-4741-9593-25bf07847b16",
+ "type": "ipv4_dhcp",
+ }
+ ],
+ "services": [{"address": "8.8.8.8", "type": "dns"}],
+ }
+ macs = {mac0: "eth0"}
+
+ expected = {
+ "version": 1,
+ "config": [
+ {
+ "mac_address": "fa:16:3e:9c:bf:3d",
+ "mtu": None,
+ "name": "eth0",
+ "subnets": [{"type": "dhcp4"}],
+ "type": "physical",
+ },
+ {"address": "8.8.8.8", "type": "nameserver"},
+ ],
+ }
+
+ for t in openstack.KNOWN_PHYSICAL_TYPES:
+ net_json["links"][0]["type"] = t
+ self.assertEqual(
+ expected,
+ openstack.convert_net_json(
+ network_json=net_json, known_macs=macs
+ ),
+ )
diff --git a/tests/unittests/sources/test_aliyun.py b/tests/unittests/sources/test_aliyun.py
new file mode 100644
index 00000000..8a61d5ee
--- /dev/null
+++ b/tests/unittests/sources/test_aliyun.py
@@ -0,0 +1,287 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import functools
+import os
+from unittest import mock
+
+import httpretty
+
+from cloudinit import helpers
+from cloudinit.sources import DataSourceAliYun as ay
+from cloudinit.sources.DataSourceEc2 import convert_ec2_metadata_network_config
+from tests.unittests import helpers as test_helpers
+
+DEFAULT_METADATA = {
+ "instance-id": "aliyun-test-vm-00",
+ "eipv4": "10.0.0.1",
+ "hostname": "test-hostname",
+ "image-id": "m-test",
+ "launch-index": "0",
+ "mac": "00:16:3e:00:00:00",
+ "network-type": "vpc",
+ "private-ipv4": "192.168.0.1",
+ "serial-number": "test-string",
+ "vpc-cidr-block": "192.168.0.0/16",
+ "vpc-id": "test-vpc",
+ "vswitch-id": "test-vpc",
+ "vswitch-cidr-block": "192.168.0.0/16",
+ "zone-id": "test-zone-1",
+ "ntp-conf": {
+ "ntp_servers": [
+ "ntp1.aliyun.com",
+ "ntp2.aliyun.com",
+ "ntp3.aliyun.com",
+ ]
+ },
+ "source-address": [
+ "http://mirrors.aliyun.com",
+ "http://mirrors.aliyuncs.com",
+ ],
+ "public-keys": {
+ "key-pair-1": {"openssh-key": "ssh-rsa AAAAB3..."},
+ "key-pair-2": {"openssh-key": "ssh-rsa AAAAB3..."},
+ },
+}
+
+DEFAULT_USERDATA = """\
+#cloud-config
+
+hostname: localhost"""
+
+
+def register_mock_metaserver(base_url, data):
+ def register_helper(register, base_url, body):
+ if isinstance(body, str):
+ register(base_url, body)
+ elif isinstance(body, list):
+ register(base_url.rstrip("/"), "\n".join(body) + "\n")
+ elif isinstance(body, dict):
+ if not body:
+ register(
+ base_url.rstrip("/") + "/", "not found", status_code=404
+ )
+ vals = []
+ for k, v in body.items():
+ if isinstance(v, (str, list)):
+ suffix = k.rstrip("/")
+ else:
+ suffix = k.rstrip("/") + "/"
+ vals.append(suffix)
+ url = base_url.rstrip("/") + "/" + suffix
+ register_helper(register, url, v)
+ register(base_url, "\n".join(vals) + "\n")
+
+ register = functools.partial(httpretty.register_uri, httpretty.GET)
+ register_helper(register, base_url, data)
+
+
+class TestAliYunDatasource(test_helpers.HttprettyTestCase):
+ def setUp(self):
+ super(TestAliYunDatasource, self).setUp()
+ cfg = {"datasource": {"AliYun": {"timeout": "1", "max_wait": "1"}}}
+ distro = {}
+ paths = helpers.Paths({"run_dir": self.tmp_dir()})
+ self.ds = ay.DataSourceAliYun(cfg, distro, paths)
+ self.metadata_address = self.ds.metadata_urls[0]
+
+ @property
+ def default_metadata(self):
+ return DEFAULT_METADATA
+
+ @property
+ def default_userdata(self):
+ return DEFAULT_USERDATA
+
+ @property
+ def metadata_url(self):
+ return (
+ os.path.join(
+ self.metadata_address,
+ self.ds.min_metadata_version,
+ "meta-data",
+ )
+ + "/"
+ )
+
+ @property
+ def userdata_url(self):
+ return os.path.join(
+ self.metadata_address, self.ds.min_metadata_version, "user-data"
+ )
+
+ # EC2 provides an instance-identity document which must return 404 here
+ # for this test to pass.
+ @property
+ def default_identity(self):
+ return {}
+
+ @property
+ def identity_url(self):
+ return os.path.join(
+ self.metadata_address,
+ self.ds.min_metadata_version,
+ "dynamic",
+ "instance-identity",
+ )
+
+ def regist_default_server(self):
+ register_mock_metaserver(self.metadata_url, self.default_metadata)
+ register_mock_metaserver(self.userdata_url, self.default_userdata)
+ register_mock_metaserver(self.identity_url, self.default_identity)
+
+ def _test_get_data(self):
+ self.assertEqual(self.ds.metadata, self.default_metadata)
+ self.assertEqual(
+ self.ds.userdata_raw, self.default_userdata.encode("utf8")
+ )
+
+ def _test_get_sshkey(self):
+ pub_keys = [
+ v["openssh-key"]
+ for (_, v) in self.default_metadata["public-keys"].items()
+ ]
+ self.assertEqual(self.ds.get_public_ssh_keys(), pub_keys)
+
+ def _test_get_iid(self):
+ self.assertEqual(
+ self.default_metadata["instance-id"], self.ds.get_instance_id()
+ )
+
+ def _test_host_name(self):
+ self.assertEqual(
+ self.default_metadata["hostname"], self.ds.get_hostname()
+ )
+
+ @mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun")
+ def test_with_mock_server(self, m_is_aliyun):
+ m_is_aliyun.return_value = True
+ self.regist_default_server()
+ ret = self.ds.get_data()
+ self.assertEqual(True, ret)
+ self.assertEqual(1, m_is_aliyun.call_count)
+ self._test_get_data()
+ self._test_get_sshkey()
+ self._test_get_iid()
+ self._test_host_name()
+ self.assertEqual("aliyun", self.ds.cloud_name)
+ self.assertEqual("ec2", self.ds.platform)
+ self.assertEqual(
+ "metadata (http://100.100.100.200)", self.ds.subplatform
+ )
+
+ @mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun")
+ def test_returns_false_when_not_on_aliyun(self, m_is_aliyun):
+ """If is_aliyun returns false, then get_data should return False."""
+ m_is_aliyun.return_value = False
+ self.regist_default_server()
+ ret = self.ds.get_data()
+ self.assertEqual(1, m_is_aliyun.call_count)
+ self.assertEqual(False, ret)
+
+ def test_parse_public_keys(self):
+ public_keys = {}
+ self.assertEqual(ay.parse_public_keys(public_keys), [])
+
+ public_keys = {"key-pair-0": "ssh-key-0"}
+ self.assertEqual(
+ ay.parse_public_keys(public_keys), [public_keys["key-pair-0"]]
+ )
+
+ public_keys = {"key-pair-0": "ssh-key-0", "key-pair-1": "ssh-key-1"}
+ self.assertEqual(
+ set(ay.parse_public_keys(public_keys)),
+ set([public_keys["key-pair-0"], public_keys["key-pair-1"]]),
+ )
+
+ public_keys = {"key-pair-0": ["ssh-key-0", "ssh-key-1"]}
+ self.assertEqual(
+ ay.parse_public_keys(public_keys), public_keys["key-pair-0"]
+ )
+
+ public_keys = {"key-pair-0": {"openssh-key": []}}
+ self.assertEqual(ay.parse_public_keys(public_keys), [])
+
+ public_keys = {"key-pair-0": {"openssh-key": "ssh-key-0"}}
+ self.assertEqual(
+ ay.parse_public_keys(public_keys),
+ [public_keys["key-pair-0"]["openssh-key"]],
+ )
+
+ public_keys = {
+ "key-pair-0": {"openssh-key": ["ssh-key-0", "ssh-key-1"]}
+ }
+ self.assertEqual(
+ ay.parse_public_keys(public_keys),
+ public_keys["key-pair-0"]["openssh-key"],
+ )
+
+ def test_route_metric_calculated_without_device_number(self):
+ """Test that route-metric code works without `device-number`
+
+ `device-number` is part of EC2 metadata, but not supported on aliyun.
+ Attempting to access it will raise a KeyError.
+
+ LP: #1917875
+ """
+ netcfg = convert_ec2_metadata_network_config(
+ {
+ "interfaces": {
+ "macs": {
+ "06:17:04:d7:26:09": {
+ "interface-id": "eni-e44ef49e",
+ },
+ "06:17:04:d7:26:08": {
+ "interface-id": "eni-e44ef49f",
+ },
+ }
+ }
+ },
+ macs_to_nics={
+ "06:17:04:d7:26:09": "eth0",
+ "06:17:04:d7:26:08": "eth1",
+ },
+ )
+
+ met0 = netcfg["ethernets"]["eth0"]["dhcp4-overrides"]["route-metric"]
+ met1 = netcfg["ethernets"]["eth1"]["dhcp4-overrides"]["route-metric"]
+
+ # route-metric numbers should be 100 apart
+ assert 100 == abs(met0 - met1)
+
+
+class TestIsAliYun(test_helpers.CiTestCase):
+ ALIYUN_PRODUCT = "Alibaba Cloud ECS"
+ read_dmi_data_expected = [mock.call("system-product-name")]
+
+ @mock.patch("cloudinit.sources.DataSourceAliYun.dmi.read_dmi_data")
+ def test_true_on_aliyun_product(self, m_read_dmi_data):
+ """Should return true if the dmi product data has expected value."""
+ m_read_dmi_data.return_value = self.ALIYUN_PRODUCT
+ ret = ay._is_aliyun()
+ self.assertEqual(
+ self.read_dmi_data_expected, m_read_dmi_data.call_args_list
+ )
+ self.assertEqual(True, ret)
+
+ @mock.patch("cloudinit.sources.DataSourceAliYun.dmi.read_dmi_data")
+ def test_false_on_empty_string(self, m_read_dmi_data):
+ """Should return false on empty value returned."""
+ m_read_dmi_data.return_value = ""
+ ret = ay._is_aliyun()
+ self.assertEqual(
+ self.read_dmi_data_expected, m_read_dmi_data.call_args_list
+ )
+ self.assertEqual(False, ret)
+
+ @mock.patch("cloudinit.sources.DataSourceAliYun.dmi.read_dmi_data")
+ def test_false_on_unknown_string(self, m_read_dmi_data):
+ """Should return false on an unrelated string."""
+ m_read_dmi_data.return_value = "cubs win"
+ ret = ay._is_aliyun()
+ self.assertEqual(
+ self.read_dmi_data_expected, m_read_dmi_data.call_args_list
+ )
+ self.assertEqual(False, ret)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_altcloud.py b/tests/unittests/sources/test_altcloud.py
index 7a5393ac..44dfafd9 100644
--- a/tests/unittests/test_datasource/test_altcloud.py
+++ b/tests/unittests/sources/test_altcloud.py
@@ -6,54 +6,47 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-'''
+"""
This test file exercises the code in sources DataSourceAltCloud.py
-'''
+"""
import os
import shutil
import tempfile
-from cloudinit import dmi
-from cloudinit import helpers
-from cloudinit import subp
-from cloudinit import util
-
-from cloudinit.tests.helpers import CiTestCase, mock
-
import cloudinit.sources.DataSourceAltCloud as dsac
+from cloudinit import dmi, helpers, subp, util
+from tests.unittests.helpers import CiTestCase, mock
-OS_UNAME_ORIG = getattr(os, 'uname')
+OS_UNAME_ORIG = getattr(os, "uname")
def _write_user_data_files(mount_dir, value):
- '''
+ """
Populate the deltacloud_user_data_file the user_data_file
which would be populated with user data.
- '''
- deltacloud_user_data_file = mount_dir + '/deltacloud-user-data.txt'
- user_data_file = mount_dir + '/user-data.txt'
+ """
+ deltacloud_user_data_file = mount_dir + "/deltacloud-user-data.txt"
+ user_data_file = mount_dir + "/user-data.txt"
- udfile = open(deltacloud_user_data_file, 'w')
+ udfile = open(deltacloud_user_data_file, "w")
udfile.write(value)
udfile.close()
os.chmod(deltacloud_user_data_file, 0o664)
- udfile = open(user_data_file, 'w')
+ udfile = open(user_data_file, "w")
udfile.write(value)
udfile.close()
os.chmod(user_data_file, 0o664)
-def _remove_user_data_files(mount_dir,
- dc_file=True,
- non_dc_file=True):
- '''
+def _remove_user_data_files(mount_dir, dc_file=True, non_dc_file=True):
+ """
Remove the test files: deltacloud_user_data_file and
user_data_file
- '''
- deltacloud_user_data_file = mount_dir + '/deltacloud-user-data.txt'
- user_data_file = mount_dir + '/user-data.txt'
+ """
+ deltacloud_user_data_file = mount_dir + "/deltacloud-user-data.txt"
+ user_data_file = mount_dir + "/user-data.txt"
# Ignore any failures removeing files that are already gone.
if dc_file:
@@ -70,9 +63,10 @@ def _remove_user_data_files(mount_dir,
def _dmi_data(expected):
- '''
+ """
Spoof the data received over DMI
- '''
+ """
+
def _data(key):
return expected
@@ -80,19 +74,19 @@ def _dmi_data(expected):
class TestGetCloudType(CiTestCase):
- '''Test to exercise method: DataSourceAltCloud.get_cloud_type()'''
+ """Test to exercise method: DataSourceAltCloud.get_cloud_type()"""
with_logs = True
def setUp(self):
- '''Set up.'''
+ """Set up."""
super(TestGetCloudType, self).setUp()
self.tmp = self.tmp_dir()
- self.paths = helpers.Paths({'cloud_dir': self.tmp})
+ self.paths = helpers.Paths({"cloud_dir": self.tmp})
self.dmi_data = dmi.read_dmi_data
# We have a different code path for arm to deal with LP1243287
# We have to switch arch to x86_64 to avoid test failure
- force_arch('x86_64')
+ force_arch("x86_64")
def tearDown(self):
# Reset
@@ -101,216 +95,226 @@ class TestGetCloudType(CiTestCase):
def test_cloud_info_file_ioerror(self):
"""Return UNKNOWN when /etc/sysconfig/cloud-info exists but errors."""
- self.assertEqual('/etc/sysconfig/cloud-info', dsac.CLOUD_INFO_FILE)
+ self.assertEqual("/etc/sysconfig/cloud-info", dsac.CLOUD_INFO_FILE)
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
# Attempting to read the directory generates IOError
- with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.tmp):
- self.assertEqual('UNKNOWN', dsrc.get_cloud_type())
+ with mock.patch.object(dsac, "CLOUD_INFO_FILE", self.tmp):
+ self.assertEqual("UNKNOWN", dsrc.get_cloud_type())
self.assertIn(
- "[Errno 21] Is a directory: '%s'" % self.tmp,
- self.logs.getvalue())
+ "[Errno 21] Is a directory: '%s'" % self.tmp, self.logs.getvalue()
+ )
def test_cloud_info_file(self):
"""Return uppercase stripped content from /etc/sysconfig/cloud-info."""
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
- cloud_info = self.tmp_path('cloud-info', dir=self.tmp)
- util.write_file(cloud_info, ' OverRiDdeN CloudType ')
+ cloud_info = self.tmp_path("cloud-info", dir=self.tmp)
+ util.write_file(cloud_info, " OverRiDdeN CloudType ")
# Attempting to read the directory generates IOError
- with mock.patch.object(dsac, 'CLOUD_INFO_FILE', cloud_info):
- self.assertEqual('OVERRIDDEN CLOUDTYPE', dsrc.get_cloud_type())
+ with mock.patch.object(dsac, "CLOUD_INFO_FILE", cloud_info):
+ self.assertEqual("OVERRIDDEN CLOUDTYPE", dsrc.get_cloud_type())
def test_rhev(self):
- '''
+ """
Test method get_cloud_type() for RHEVm systems.
Forcing read_dmi_data return to match a RHEVm system: RHEV Hypervisor
- '''
- dmi.read_dmi_data = _dmi_data('RHEV')
+ """
+ dmi.read_dmi_data = _dmi_data("RHEV")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
- self.assertEqual('RHEV', dsrc.get_cloud_type())
+ self.assertEqual("RHEV", dsrc.get_cloud_type())
def test_vsphere(self):
- '''
+ """
Test method get_cloud_type() for vSphere systems.
Forcing read_dmi_data return to match a vSphere system: RHEV Hypervisor
- '''
- dmi.read_dmi_data = _dmi_data('VMware Virtual Platform')
+ """
+ dmi.read_dmi_data = _dmi_data("VMware Virtual Platform")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
- self.assertEqual('VSPHERE', dsrc.get_cloud_type())
+ self.assertEqual("VSPHERE", dsrc.get_cloud_type())
def test_unknown(self):
- '''
+ """
Test method get_cloud_type() for unknown systems.
Forcing read_dmi_data return to match an unrecognized return.
- '''
- dmi.read_dmi_data = _dmi_data('Unrecognized Platform')
+ """
+ dmi.read_dmi_data = _dmi_data("Unrecognized Platform")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
- self.assertEqual('UNKNOWN', dsrc.get_cloud_type())
+ self.assertEqual("UNKNOWN", dsrc.get_cloud_type())
class TestGetDataCloudInfoFile(CiTestCase):
- '''
+ """
Test to exercise method: DataSourceAltCloud.get_data()
With a contrived CLOUD_INFO_FILE
- '''
+ """
+
def setUp(self):
- '''Set up.'''
+ """Set up."""
self.tmp = self.tmp_dir()
self.paths = helpers.Paths(
- {'cloud_dir': self.tmp, 'run_dir': self.tmp})
- self.cloud_info_file = self.tmp_path('cloud-info', dir=self.tmp)
+ {"cloud_dir": self.tmp, "run_dir": self.tmp}
+ )
+ self.cloud_info_file = self.tmp_path("cloud-info", dir=self.tmp)
def test_rhev(self):
- '''Success Test module get_data() forcing RHEV.'''
+ """Success Test module get_data() forcing RHEV."""
- util.write_file(self.cloud_info_file, 'RHEV')
+ util.write_file(self.cloud_info_file, "RHEV")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
dsrc.user_data_rhevm = lambda: True
- with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file):
+ with mock.patch.object(dsac, "CLOUD_INFO_FILE", self.cloud_info_file):
self.assertEqual(True, dsrc.get_data())
- self.assertEqual('altcloud', dsrc.cloud_name)
- self.assertEqual('altcloud', dsrc.platform_type)
- self.assertEqual('rhev (/dev/fd0)', dsrc.subplatform)
+ self.assertEqual("altcloud", dsrc.cloud_name)
+ self.assertEqual("altcloud", dsrc.platform_type)
+ self.assertEqual("rhev (/dev/fd0)", dsrc.subplatform)
def test_vsphere(self):
- '''Success Test module get_data() forcing VSPHERE.'''
+ """Success Test module get_data() forcing VSPHERE."""
- util.write_file(self.cloud_info_file, 'VSPHERE')
+ util.write_file(self.cloud_info_file, "VSPHERE")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
dsrc.user_data_vsphere = lambda: True
- with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file):
+ with mock.patch.object(dsac, "CLOUD_INFO_FILE", self.cloud_info_file):
self.assertEqual(True, dsrc.get_data())
- self.assertEqual('altcloud', dsrc.cloud_name)
- self.assertEqual('altcloud', dsrc.platform_type)
- self.assertEqual('vsphere (unknown)', dsrc.subplatform)
+ self.assertEqual("altcloud", dsrc.cloud_name)
+ self.assertEqual("altcloud", dsrc.platform_type)
+ self.assertEqual("vsphere (unknown)", dsrc.subplatform)
def test_fail_rhev(self):
- '''Failure Test module get_data() forcing RHEV.'''
+ """Failure Test module get_data() forcing RHEV."""
- util.write_file(self.cloud_info_file, 'RHEV')
+ util.write_file(self.cloud_info_file, "RHEV")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
dsrc.user_data_rhevm = lambda: False
- with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file):
+ with mock.patch.object(dsac, "CLOUD_INFO_FILE", self.cloud_info_file):
self.assertEqual(False, dsrc.get_data())
def test_fail_vsphere(self):
- '''Failure Test module get_data() forcing VSPHERE.'''
+ """Failure Test module get_data() forcing VSPHERE."""
- util.write_file(self.cloud_info_file, 'VSPHERE')
+ util.write_file(self.cloud_info_file, "VSPHERE")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
dsrc.user_data_vsphere = lambda: False
- with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file):
+ with mock.patch.object(dsac, "CLOUD_INFO_FILE", self.cloud_info_file):
self.assertEqual(False, dsrc.get_data())
def test_unrecognized(self):
- '''Failure Test module get_data() forcing unrecognized.'''
+ """Failure Test module get_data() forcing unrecognized."""
- util.write_file(self.cloud_info_file, 'unrecognized')
+ util.write_file(self.cloud_info_file, "unrecognized")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
- with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file):
+ with mock.patch.object(dsac, "CLOUD_INFO_FILE", self.cloud_info_file):
self.assertEqual(False, dsrc.get_data())
class TestGetDataNoCloudInfoFile(CiTestCase):
- '''
+ """
Test to exercise method: DataSourceAltCloud.get_data()
Without a CLOUD_INFO_FILE
- '''
+ """
+
def setUp(self):
- '''Set up.'''
+ """Set up."""
self.tmp = self.tmp_dir()
self.paths = helpers.Paths(
- {'cloud_dir': self.tmp, 'run_dir': self.tmp})
+ {"cloud_dir": self.tmp, "run_dir": self.tmp}
+ )
self.dmi_data = dmi.read_dmi_data
- dsac.CLOUD_INFO_FILE = \
- 'no such file'
+ dsac.CLOUD_INFO_FILE = "no such file"
# We have a different code path for arm to deal with LP1243287
# We have to switch arch to x86_64 to avoid test failure
- force_arch('x86_64')
+ force_arch("x86_64")
def tearDown(self):
# Reset
- dsac.CLOUD_INFO_FILE = \
- '/etc/sysconfig/cloud-info'
+ dsac.CLOUD_INFO_FILE = "/etc/sysconfig/cloud-info"
dmi.read_dmi_data = self.dmi_data
# Return back to original arch
force_arch()
def test_rhev_no_cloud_file(self):
- '''Test No cloud info file module get_data() forcing RHEV.'''
+ """Test No cloud info file module get_data() forcing RHEV."""
- dmi.read_dmi_data = _dmi_data('RHEV Hypervisor')
+ dmi.read_dmi_data = _dmi_data("RHEV Hypervisor")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
dsrc.user_data_rhevm = lambda: True
self.assertEqual(True, dsrc.get_data())
def test_vsphere_no_cloud_file(self):
- '''Test No cloud info file module get_data() forcing VSPHERE.'''
+ """Test No cloud info file module get_data() forcing VSPHERE."""
- dmi.read_dmi_data = _dmi_data('VMware Virtual Platform')
+ dmi.read_dmi_data = _dmi_data("VMware Virtual Platform")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
dsrc.user_data_vsphere = lambda: True
self.assertEqual(True, dsrc.get_data())
def test_failure_no_cloud_file(self):
- '''Test No cloud info file module get_data() forcing unrecognized.'''
+ """Test No cloud info file module get_data() forcing unrecognized."""
- dmi.read_dmi_data = _dmi_data('Unrecognized Platform')
+ dmi.read_dmi_data = _dmi_data("Unrecognized Platform")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
self.assertEqual(False, dsrc.get_data())
class TestUserDataRhevm(CiTestCase):
- '''
+ """
Test to exercise method: DataSourceAltCloud.user_data_rhevm()
- '''
+ """
+
def setUp(self):
- '''Set up.'''
- self.paths = helpers.Paths({'cloud_dir': '/tmp'})
+ """Set up."""
+ self.paths = helpers.Paths({"cloud_dir": "/tmp"})
self.mount_dir = self.tmp_dir()
- _write_user_data_files(self.mount_dir, 'test user data')
+ _write_user_data_files(self.mount_dir, "test user data")
self.add_patch(
- 'cloudinit.sources.DataSourceAltCloud.modprobe_floppy',
- 'm_modprobe_floppy', return_value=None)
+ "cloudinit.sources.DataSourceAltCloud.modprobe_floppy",
+ "m_modprobe_floppy",
+ return_value=None,
+ )
self.add_patch(
- 'cloudinit.sources.DataSourceAltCloud.util.udevadm_settle',
- 'm_udevadm_settle', return_value=('', ''))
+ "cloudinit.sources.DataSourceAltCloud.util.udevadm_settle",
+ "m_udevadm_settle",
+ return_value=("", ""),
+ )
self.add_patch(
- 'cloudinit.sources.DataSourceAltCloud.util.mount_cb',
- 'm_mount_cb')
+ "cloudinit.sources.DataSourceAltCloud.util.mount_cb", "m_mount_cb"
+ )
def test_mount_cb_fails(self):
- '''Test user_data_rhevm() where mount_cb fails.'''
+ """Test user_data_rhevm() where mount_cb fails."""
self.m_mount_cb.side_effect = util.MountFailedError("Failed Mount")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
self.assertEqual(False, dsrc.user_data_rhevm())
def test_modprobe_fails(self):
- '''Test user_data_rhevm() where modprobe fails.'''
+ """Test user_data_rhevm() where modprobe fails."""
self.m_modprobe_floppy.side_effect = subp.ProcessExecutionError(
- "Failed modprobe")
+ "Failed modprobe"
+ )
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
self.assertEqual(False, dsrc.user_data_rhevm())
def test_no_modprobe_cmd(self):
- '''Test user_data_rhevm() with no modprobe command.'''
+ """Test user_data_rhevm() with no modprobe command."""
self.m_modprobe_floppy.side_effect = subp.ProcessExecutionError(
- "No such file or dir")
+ "No such file or dir"
+ )
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
self.assertEqual(False, dsrc.user_data_rhevm())
def test_udevadm_fails(self):
- '''Test user_data_rhevm() where udevadm fails.'''
+ """Test user_data_rhevm() where udevadm fails."""
self.m_udevadm_settle.side_effect = subp.ProcessExecutionError(
- "Failed settle.")
+ "Failed settle."
+ )
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
self.assertEqual(False, dsrc.user_data_rhevm())
def test_no_udevadm_cmd(self):
- '''Test user_data_rhevm() with no udevadm command.'''
+ """Test user_data_rhevm() with no udevadm command."""
self.m_udevadm_settle.side_effect = OSError("No such file or dir")
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
@@ -318,16 +322,17 @@ class TestUserDataRhevm(CiTestCase):
class TestUserDataVsphere(CiTestCase):
- '''
+ """
Test to exercise method: DataSourceAltCloud.user_data_vsphere()
- '''
+ """
+
def setUp(self):
- '''Set up.'''
+ """Set up."""
self.tmp = self.tmp_dir()
- self.paths = helpers.Paths({'cloud_dir': self.tmp})
+ self.paths = helpers.Paths({"cloud_dir": self.tmp})
self.mount_dir = tempfile.mkdtemp()
- _write_user_data_files(self.mount_dir, 'test user data')
+ _write_user_data_files(self.mount_dir, "test user data")
def tearDown(self):
# Reset
@@ -340,13 +345,12 @@ class TestUserDataVsphere(CiTestCase):
except OSError:
pass
- dsac.CLOUD_INFO_FILE = \
- '/etc/sysconfig/cloud-info'
+ dsac.CLOUD_INFO_FILE = "/etc/sysconfig/cloud-info"
@mock.patch("cloudinit.sources.DataSourceAltCloud.util.find_devs_with")
@mock.patch("cloudinit.sources.DataSourceAltCloud.util.mount_cb")
def test_user_data_vsphere_no_cdrom(self, m_mount_cb, m_find_devs_with):
- '''Test user_data_vsphere() where mount_cb fails.'''
+ """Test user_data_vsphere() where mount_cb fails."""
m_mount_cb.return_value = []
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
@@ -356,7 +360,7 @@ class TestUserDataVsphere(CiTestCase):
@mock.patch("cloudinit.sources.DataSourceAltCloud.util.find_devs_with")
@mock.patch("cloudinit.sources.DataSourceAltCloud.util.mount_cb")
def test_user_data_vsphere_mcb_fail(self, m_mount_cb, m_find_devs_with):
- '''Test user_data_vsphere() where mount_cb fails.'''
+ """Test user_data_vsphere() where mount_cb fails."""
m_find_devs_with.return_value = ["/dev/mock/cdrom"]
m_mount_cb.side_effect = util.MountFailedError("Unable To mount")
@@ -370,28 +374,30 @@ class TestUserDataVsphere(CiTestCase):
def test_user_data_vsphere_success(self, m_mount_cb, m_find_devs_with):
"""Test user_data_vsphere() where successful."""
m_find_devs_with.return_value = ["/dev/mock/cdrom"]
- m_mount_cb.return_value = 'raw userdata from cdrom'
+ m_mount_cb.return_value = "raw userdata from cdrom"
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
- cloud_info = self.tmp_path('cloud-info', dir=self.tmp)
- util.write_file(cloud_info, 'VSPHERE')
+ cloud_info = self.tmp_path("cloud-info", dir=self.tmp)
+ util.write_file(cloud_info, "VSPHERE")
self.assertEqual(True, dsrc.user_data_vsphere())
- m_find_devs_with.assert_called_once_with('LABEL=CDROM')
+ m_find_devs_with.assert_called_once_with("LABEL=CDROM")
m_mount_cb.assert_called_once_with(
- '/dev/mock/cdrom', dsac.read_user_data_callback)
- with mock.patch.object(dsrc, 'get_cloud_type', return_value='VSPHERE'):
- self.assertEqual('vsphere (/dev/mock/cdrom)', dsrc.subplatform)
+ "/dev/mock/cdrom", dsac.read_user_data_callback
+ )
+ with mock.patch.object(dsrc, "get_cloud_type", return_value="VSPHERE"):
+ self.assertEqual("vsphere (/dev/mock/cdrom)", dsrc.subplatform)
class TestReadUserDataCallback(CiTestCase):
- '''
+ """
Test to exercise method: DataSourceAltCloud.read_user_data_callback()
- '''
+ """
+
def setUp(self):
- '''Set up.'''
- self.paths = helpers.Paths({'cloud_dir': '/tmp'})
+ """Set up."""
+ self.paths = helpers.Paths({"cloud_dir": "/tmp"})
self.mount_dir = tempfile.mkdtemp()
- _write_user_data_files(self.mount_dir, 'test user data')
+ _write_user_data_files(self.mount_dir, "test user data")
def tearDown(self):
# Reset
@@ -405,46 +411,49 @@ class TestReadUserDataCallback(CiTestCase):
pass
def test_callback_both(self):
- '''Test read_user_data_callback() with both files.'''
+ """Test read_user_data_callback() with both files."""
- self.assertEqual('test user data',
- dsac.read_user_data_callback(self.mount_dir))
+ self.assertEqual(
+ "test user data", dsac.read_user_data_callback(self.mount_dir)
+ )
def test_callback_dc(self):
- '''Test read_user_data_callback() with only DC file.'''
+ """Test read_user_data_callback() with only DC file."""
- _remove_user_data_files(self.mount_dir,
- dc_file=False,
- non_dc_file=True)
+ _remove_user_data_files(
+ self.mount_dir, dc_file=False, non_dc_file=True
+ )
- self.assertEqual('test user data',
- dsac.read_user_data_callback(self.mount_dir))
+ self.assertEqual(
+ "test user data", dsac.read_user_data_callback(self.mount_dir)
+ )
def test_callback_non_dc(self):
- '''Test read_user_data_callback() with only non-DC file.'''
+ """Test read_user_data_callback() with only non-DC file."""
- _remove_user_data_files(self.mount_dir,
- dc_file=True,
- non_dc_file=False)
+ _remove_user_data_files(
+ self.mount_dir, dc_file=True, non_dc_file=False
+ )
- self.assertEqual('test user data',
- dsac.read_user_data_callback(self.mount_dir))
+ self.assertEqual(
+ "test user data", dsac.read_user_data_callback(self.mount_dir)
+ )
def test_callback_none(self):
- '''Test read_user_data_callback() no files are found.'''
+ """Test read_user_data_callback() no files are found."""
_remove_user_data_files(self.mount_dir)
self.assertIsNone(dsac.read_user_data_callback(self.mount_dir))
def force_arch(arch=None):
-
def _os_uname():
- return ('LINUX', 'NODENAME', 'RELEASE', 'VERSION', arch)
+ return ("LINUX", "NODENAME", "RELEASE", "VERSION", arch)
if arch:
- setattr(os, 'uname', _os_uname)
+ setattr(os, "uname", _os_uname)
elif arch is None:
- setattr(os, 'uname', OS_UNAME_ORIG)
+ setattr(os, "uname", OS_UNAME_ORIG)
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_azure.py b/tests/unittests/sources/test_azure.py
new file mode 100644
index 00000000..5f956a63
--- /dev/null
+++ b/tests/unittests/sources/test_azure.py
@@ -0,0 +1,4306 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import copy
+import crypt
+import json
+import os
+import stat
+import xml.etree.ElementTree as ET
+
+import httpretty
+import pytest
+import requests
+import yaml
+
+from cloudinit import distros, helpers, url_helper
+from cloudinit.sources import UNSET
+from cloudinit.sources import DataSourceAzure as dsaz
+from cloudinit.sources import InvalidMetaDataException
+from cloudinit.sources.helpers import netlink
+from cloudinit.util import (
+ MountFailedError,
+ b64e,
+ decode_binary,
+ json_dumps,
+ load_file,
+ load_json,
+ write_file,
+)
+from cloudinit.version import version_string as vs
+from tests.unittests.helpers import (
+ CiTestCase,
+ ExitStack,
+ HttprettyTestCase,
+ mock,
+ populate_dir,
+ resourceLocation,
+ wrap_and_call,
+)
+
+MOCKPATH = "cloudinit.sources.DataSourceAzure."
+
+
+@pytest.fixture
+def azure_ds(paths):
+ """Provide DataSourceAzure instance with mocks for minimal test case."""
+ with mock.patch(MOCKPATH + "_is_platform_viable", return_value=True):
+ yield dsaz.DataSourceAzure(sys_cfg={}, distro=mock.Mock(), paths=paths)
+
+
+@pytest.fixture
+def mock_azure_helper_readurl():
+ with mock.patch(
+ "cloudinit.sources.helpers.azure.url_helper.readurl", autospec=True
+ ) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_azure_get_metadata_from_fabric():
+ with mock.patch(
+ MOCKPATH + "get_metadata_from_fabric",
+ autospec=True,
+ ) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_azure_report_failure_to_fabric():
+ with mock.patch(
+ MOCKPATH + "report_failure_to_fabric",
+ autospec=True,
+ ) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_dmi_read_dmi_data():
+ def fake_read(key: str) -> str:
+ if key == "system-uuid":
+ return "fake-system-uuid"
+ raise RuntimeError()
+
+ with mock.patch(
+ MOCKPATH + "dmi.read_dmi_data",
+ side_effect=fake_read,
+ autospec=True,
+ ) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_net_dhcp_maybe_perform_dhcp_discovery():
+ with mock.patch(
+ "cloudinit.net.dhcp.maybe_perform_dhcp_discovery",
+ return_value=[
+ {
+ "unknown-245": "aa:bb:cc:dd",
+ "interface": "ethBoot0",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ }
+ ],
+ autospec=True,
+ ) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_net_dhcp_EphemeralIPv4Network():
+ with mock.patch(
+ "cloudinit.net.dhcp.EphemeralIPv4Network",
+ autospec=True,
+ ) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_get_interfaces():
+ with mock.patch(MOCKPATH + "net.get_interfaces", return_value=[]) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_get_interface_mac():
+ with mock.patch(
+ MOCKPATH + "net.get_interface_mac",
+ return_value="001122334455",
+ ) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_netlink():
+ with mock.patch(
+ MOCKPATH + "netlink",
+ autospec=True,
+ ) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_os_path_isfile():
+ with mock.patch(MOCKPATH + "os.path.isfile", autospec=True) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_readurl():
+ with mock.patch(MOCKPATH + "readurl", autospec=True) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_subp_subp():
+ with mock.patch(MOCKPATH + "subp.subp", side_effect=[]) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_util_ensure_dir():
+ with mock.patch(
+ MOCKPATH + "util.ensure_dir",
+ autospec=True,
+ ) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_util_find_devs_with():
+ with mock.patch(MOCKPATH + "util.find_devs_with", autospec=True) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_util_load_file():
+ with mock.patch(
+ MOCKPATH + "util.load_file",
+ autospec=True,
+ return_value=b"",
+ ) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_util_mount_cb():
+ with mock.patch(
+ MOCKPATH + "util.mount_cb",
+ autospec=True,
+ return_value=({}, "", {}, {}),
+ ) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_util_write_file():
+ with mock.patch(
+ MOCKPATH + "util.write_file",
+ autospec=True,
+ ) as m:
+ yield m
+
+
+def construct_valid_ovf_env(
+ data=None, pubkeys=None, userdata=None, platform_settings=None
+):
+ if data is None:
+ data = {"HostName": "FOOHOST"}
+ if pubkeys is None:
+ pubkeys = {}
+
+ content = """<?xml version="1.0" encoding="utf-8"?>
+<Environment xmlns="http://schemas.dmtf.org/ovf/environment/1"
+ xmlns:oe="http://schemas.dmtf.org/ovf/environment/1"
+ xmlns:wa="http://schemas.microsoft.com/windowsazure"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+
+ <wa:ProvisioningSection><wa:Version>1.0</wa:Version>
+ <LinuxProvisioningConfigurationSet
+ xmlns="http://schemas.microsoft.com/windowsazure"
+ xmlns:i="http://www.w3.org/2001/XMLSchema-instance">
+ <ConfigurationSetType>LinuxProvisioningConfiguration</ConfigurationSetType>
+ """
+ for key, dval in data.items():
+ if isinstance(dval, dict):
+ val = dict(dval).get("text")
+ attrs = " " + " ".join(
+ [
+ "%s='%s'" % (k, v)
+ for k, v in dict(dval).items()
+ if k != "text"
+ ]
+ )
+ else:
+ val = dval
+ attrs = ""
+ content += "<%s%s>%s</%s>\n" % (key, attrs, val, key)
+
+ if userdata:
+ content += "<UserData>%s</UserData>\n" % (b64e(userdata))
+
+ if pubkeys:
+ content += "<SSH><PublicKeys>\n"
+ for fp, path, value in pubkeys:
+ content += " <PublicKey>"
+ if fp and path:
+ content += "<Fingerprint>%s</Fingerprint><Path>%s</Path>" % (
+ fp,
+ path,
+ )
+ if value:
+ content += "<Value>%s</Value>" % value
+ content += "</PublicKey>\n"
+ content += "</PublicKeys></SSH>"
+ content += """
+ </LinuxProvisioningConfigurationSet>
+ </wa:ProvisioningSection>
+ <wa:PlatformSettingsSection><wa:Version>1.0</wa:Version>
+ <PlatformSettings xmlns="http://schemas.microsoft.com/windowsazure"
+ xmlns:i="http://www.w3.org/2001/XMLSchema-instance">
+ <KmsServerHostname>kms.core.windows.net</KmsServerHostname>
+ <ProvisionGuestAgent>false</ProvisionGuestAgent>
+ <GuestAgentPackageName i:nil="true" />"""
+ if platform_settings:
+ for k, v in platform_settings.items():
+ content += "<%s>%s</%s>\n" % (k, v, k)
+ if "PreprovisionedVMType" not in platform_settings:
+ content += """<PreprovisionedVMType i:nil="true" />"""
+ content += """</PlatformSettings></wa:PlatformSettingsSection>
+</Environment>"""
+
+ return content
+
+
+NETWORK_METADATA = {
+ "compute": {
+ "location": "eastus2",
+ "name": "my-hostname",
+ "offer": "UbuntuServer",
+ "osType": "Linux",
+ "placementGroupId": "",
+ "platformFaultDomain": "0",
+ "platformUpdateDomain": "0",
+ "publisher": "Canonical",
+ "resourceGroupName": "srugroup1",
+ "sku": "19.04-DAILY",
+ "subscriptionId": "12aad61c-6de4-4e53-a6c6-5aff52a83777",
+ "tags": "",
+ "version": "19.04.201906190",
+ "vmId": "ff702a6b-cb6a-4fcd-ad68-b4ce38227642",
+ "vmScaleSetName": "",
+ "vmSize": "Standard_DS1_v2",
+ "zone": "",
+ "publicKeys": [{"keyData": "ssh-rsa key1", "path": "path1"}],
+ },
+ "network": {
+ "interface": [
+ {
+ "macAddress": "000D3A047598",
+ "ipv6": {"ipAddress": []},
+ "ipv4": {
+ "subnet": [{"prefix": "24", "address": "10.0.0.0"}],
+ "ipAddress": [
+ {
+ "privateIpAddress": "10.0.0.4",
+ "publicIpAddress": "104.46.124.81",
+ }
+ ],
+ },
+ }
+ ]
+ },
+}
+
+SECONDARY_INTERFACE = {
+ "macAddress": "220D3A047598",
+ "ipv6": {"ipAddress": []},
+ "ipv4": {
+ "subnet": [{"prefix": "24", "address": "10.0.1.0"}],
+ "ipAddress": [
+ {
+ "privateIpAddress": "10.0.1.5",
+ }
+ ],
+ },
+}
+
+SECONDARY_INTERFACE_NO_IP = {
+ "macAddress": "220D3A047598",
+ "ipv6": {"ipAddress": []},
+ "ipv4": {
+ "subnet": [{"prefix": "24", "address": "10.0.1.0"}],
+ "ipAddress": [],
+ },
+}
+
+IMDS_NETWORK_METADATA = {
+ "interface": [
+ {
+ "macAddress": "000D3A047598",
+ "ipv6": {"ipAddress": []},
+ "ipv4": {
+ "subnet": [{"prefix": "24", "address": "10.0.0.0"}],
+ "ipAddress": [
+ {
+ "privateIpAddress": "10.0.0.4",
+ "publicIpAddress": "104.46.124.81",
+ }
+ ],
+ },
+ }
+ ]
+}
+
+EXAMPLE_UUID = "d0df4c54-4ecb-4a4b-9954-5bdf3ed5c3b8"
+
+
+class TestParseNetworkConfig(CiTestCase):
+
+ maxDiff = None
+ fallback_config = {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": "eth0",
+ "mac_address": "00:11:22:33:44:55",
+ "params": {"driver": "hv_netsvc"},
+ "subnets": [{"type": "dhcp"}],
+ }
+ ],
+ }
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
+ )
+ def test_single_ipv4_nic_configuration(self, m_driver):
+ """parse_network_config emits dhcp on single nic with ipv4"""
+ expected = {
+ "ethernets": {
+ "eth0": {
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ "dhcp6": False,
+ "match": {"macaddress": "00:0d:3a:04:75:98"},
+ "set-name": "eth0",
+ }
+ },
+ "version": 2,
+ }
+ self.assertEqual(expected, dsaz.parse_network_config(NETWORK_METADATA))
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
+ )
+ def test_increases_route_metric_for_non_primary_nics(self, m_driver):
+ """parse_network_config increases route-metric for each nic"""
+ expected = {
+ "ethernets": {
+ "eth0": {
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ "dhcp6": False,
+ "match": {"macaddress": "00:0d:3a:04:75:98"},
+ "set-name": "eth0",
+ },
+ "eth1": {
+ "set-name": "eth1",
+ "match": {"macaddress": "22:0d:3a:04:75:98"},
+ "dhcp6": False,
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 200},
+ },
+ "eth2": {
+ "set-name": "eth2",
+ "match": {"macaddress": "33:0d:3a:04:75:98"},
+ "dhcp6": False,
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 300},
+ },
+ },
+ "version": 2,
+ }
+ imds_data = copy.deepcopy(NETWORK_METADATA)
+ imds_data["network"]["interface"].append(SECONDARY_INTERFACE)
+ third_intf = copy.deepcopy(SECONDARY_INTERFACE)
+ third_intf["macAddress"] = third_intf["macAddress"].replace("22", "33")
+ third_intf["ipv4"]["subnet"][0]["address"] = "10.0.2.0"
+ third_intf["ipv4"]["ipAddress"][0]["privateIpAddress"] = "10.0.2.6"
+ imds_data["network"]["interface"].append(third_intf)
+ self.assertEqual(expected, dsaz.parse_network_config(imds_data))
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
+ )
+ def test_ipv4_and_ipv6_route_metrics_match_for_nics(self, m_driver):
+ """parse_network_config emits matching ipv4 and ipv6 route-metrics."""
+ expected = {
+ "ethernets": {
+ "eth0": {
+ "addresses": ["10.0.0.5/24", "2001:dead:beef::2/128"],
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ "dhcp6": True,
+ "dhcp6-overrides": {"route-metric": 100},
+ "match": {"macaddress": "00:0d:3a:04:75:98"},
+ "set-name": "eth0",
+ },
+ "eth1": {
+ "set-name": "eth1",
+ "match": {"macaddress": "22:0d:3a:04:75:98"},
+ "dhcp4": True,
+ "dhcp6": False,
+ "dhcp4-overrides": {"route-metric": 200},
+ },
+ "eth2": {
+ "set-name": "eth2",
+ "match": {"macaddress": "33:0d:3a:04:75:98"},
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 300},
+ "dhcp6": True,
+ "dhcp6-overrides": {"route-metric": 300},
+ },
+ },
+ "version": 2,
+ }
+ imds_data = copy.deepcopy(NETWORK_METADATA)
+ nic1 = imds_data["network"]["interface"][0]
+ nic1["ipv4"]["ipAddress"].append({"privateIpAddress": "10.0.0.5"})
+
+ nic1["ipv6"] = {
+ "subnet": [{"address": "2001:dead:beef::16"}],
+ "ipAddress": [
+ {"privateIpAddress": "2001:dead:beef::1"},
+ {"privateIpAddress": "2001:dead:beef::2"},
+ ],
+ }
+ imds_data["network"]["interface"].append(SECONDARY_INTERFACE)
+ third_intf = copy.deepcopy(SECONDARY_INTERFACE)
+ third_intf["macAddress"] = third_intf["macAddress"].replace("22", "33")
+ third_intf["ipv4"]["subnet"][0]["address"] = "10.0.2.0"
+ third_intf["ipv4"]["ipAddress"][0]["privateIpAddress"] = "10.0.2.6"
+ third_intf["ipv6"] = {
+ "subnet": [{"prefix": "64", "address": "2001:dead:beef::2"}],
+ "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}],
+ }
+ imds_data["network"]["interface"].append(third_intf)
+ self.assertEqual(expected, dsaz.parse_network_config(imds_data))
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
+ )
+ def test_ipv4_secondary_ips_will_be_static_addrs(self, m_driver):
+ """parse_network_config emits primary ipv4 as dhcp others are static"""
+ expected = {
+ "ethernets": {
+ "eth0": {
+ "addresses": ["10.0.0.5/24"],
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ "dhcp6": True,
+ "dhcp6-overrides": {"route-metric": 100},
+ "match": {"macaddress": "00:0d:3a:04:75:98"},
+ "set-name": "eth0",
+ }
+ },
+ "version": 2,
+ }
+ imds_data = copy.deepcopy(NETWORK_METADATA)
+ nic1 = imds_data["network"]["interface"][0]
+ nic1["ipv4"]["ipAddress"].append({"privateIpAddress": "10.0.0.5"})
+
+ nic1["ipv6"] = {
+ "subnet": [{"prefix": "10", "address": "2001:dead:beef::16"}],
+ "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}],
+ }
+ self.assertEqual(expected, dsaz.parse_network_config(imds_data))
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
+ )
+ def test_ipv6_secondary_ips_will_be_static_cidrs(self, m_driver):
+ """parse_network_config emits primary ipv6 as dhcp others are static"""
+ expected = {
+ "ethernets": {
+ "eth0": {
+ "addresses": ["10.0.0.5/24", "2001:dead:beef::2/10"],
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ "dhcp6": True,
+ "dhcp6-overrides": {"route-metric": 100},
+ "match": {"macaddress": "00:0d:3a:04:75:98"},
+ "set-name": "eth0",
+ }
+ },
+ "version": 2,
+ }
+ imds_data = copy.deepcopy(NETWORK_METADATA)
+ nic1 = imds_data["network"]["interface"][0]
+ nic1["ipv4"]["ipAddress"].append({"privateIpAddress": "10.0.0.5"})
+
+ # Secondary ipv6 addresses currently ignored/unconfigured
+ nic1["ipv6"] = {
+ "subnet": [{"prefix": "10", "address": "2001:dead:beef::16"}],
+ "ipAddress": [
+ {"privateIpAddress": "2001:dead:beef::1"},
+ {"privateIpAddress": "2001:dead:beef::2"},
+ ],
+ }
+ self.assertEqual(expected, dsaz.parse_network_config(imds_data))
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver",
+ return_value="hv_netvsc",
+ )
+ def test_match_driver_for_netvsc(self, m_driver):
+ """parse_network_config emits driver when using netvsc."""
+ expected = {
+ "ethernets": {
+ "eth0": {
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ "dhcp6": False,
+ "match": {
+ "macaddress": "00:0d:3a:04:75:98",
+ "driver": "hv_netvsc",
+ },
+ "set-name": "eth0",
+ }
+ },
+ "version": 2,
+ }
+ self.assertEqual(expected, dsaz.parse_network_config(NETWORK_METADATA))
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
+ )
+ @mock.patch("cloudinit.net.generate_fallback_config")
+ def test_parse_network_config_uses_fallback_cfg_when_no_network_metadata(
+ self, m_fallback_config, m_driver
+ ):
+ """parse_network_config generates fallback network config when the
+ IMDS instance metadata is corrupted/invalid, such as when
+ network metadata is not present.
+ """
+ imds_metadata_missing_network_metadata = copy.deepcopy(
+ NETWORK_METADATA
+ )
+ del imds_metadata_missing_network_metadata["network"]
+ m_fallback_config.return_value = self.fallback_config
+ self.assertEqual(
+ self.fallback_config,
+ dsaz.parse_network_config(imds_metadata_missing_network_metadata),
+ )
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
+ )
+ @mock.patch("cloudinit.net.generate_fallback_config")
+ def test_parse_network_config_uses_fallback_cfg_when_no_interface_metadata(
+ self, m_fallback_config, m_driver
+ ):
+ """parse_network_config generates fallback network config when the
+ IMDS instance metadata is corrupted/invalid, such as when
+ network interface metadata is not present.
+ """
+ imds_metadata_missing_interface_metadata = copy.deepcopy(
+ NETWORK_METADATA
+ )
+ del imds_metadata_missing_interface_metadata["network"]["interface"]
+ m_fallback_config.return_value = self.fallback_config
+ self.assertEqual(
+ self.fallback_config,
+ dsaz.parse_network_config(
+ imds_metadata_missing_interface_metadata
+ ),
+ )
+
+
+class TestGetMetadataFromIMDS(HttprettyTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestGetMetadataFromIMDS, self).setUp()
+ self.network_md_url = "{}/instance?api-version=2019-06-01".format(
+ dsaz.IMDS_URL
+ )
+
+ @mock.patch(MOCKPATH + "readurl", autospec=True)
+ def test_get_metadata_uses_instance_url(self, m_readurl):
+ """Make sure readurl is called with the correct url when accessing
+ metadata"""
+ m_readurl.return_value = url_helper.StringResponse(
+ json.dumps(IMDS_NETWORK_METADATA).encode("utf-8")
+ )
+
+ dsaz.get_metadata_from_imds(retries=3, md_type=dsaz.MetadataType.ALL)
+ m_readurl.assert_called_with(
+ "http://169.254.169.254/metadata/instance?api-version=2019-06-01",
+ exception_cb=mock.ANY,
+ headers=mock.ANY,
+ retries=mock.ANY,
+ timeout=mock.ANY,
+ infinite=False,
+ )
+
+ @mock.patch(MOCKPATH + "readurl", autospec=True)
+ def test_get_network_metadata_uses_network_url(self, m_readurl):
+ """Make sure readurl is called with the correct url when accessing
+ network metadata"""
+ m_readurl.return_value = url_helper.StringResponse(
+ json.dumps(IMDS_NETWORK_METADATA).encode("utf-8")
+ )
+
+ dsaz.get_metadata_from_imds(
+ retries=3, md_type=dsaz.MetadataType.NETWORK
+ )
+ m_readurl.assert_called_with(
+ "http://169.254.169.254/metadata/instance/network?api-version="
+ "2019-06-01",
+ exception_cb=mock.ANY,
+ headers=mock.ANY,
+ retries=mock.ANY,
+ timeout=mock.ANY,
+ infinite=False,
+ )
+
+ @mock.patch(MOCKPATH + "readurl", autospec=True)
+ @mock.patch(MOCKPATH + "EphemeralDHCPv4", autospec=True)
+ def test_get_default_metadata_uses_instance_url(self, m_dhcp, m_readurl):
+ """Make sure readurl is called with the correct url when accessing
+ metadata"""
+ m_readurl.return_value = url_helper.StringResponse(
+ json.dumps(IMDS_NETWORK_METADATA).encode("utf-8")
+ )
+
+ dsaz.get_metadata_from_imds(retries=3)
+ m_readurl.assert_called_with(
+ "http://169.254.169.254/metadata/instance?api-version=2019-06-01",
+ exception_cb=mock.ANY,
+ headers=mock.ANY,
+ retries=mock.ANY,
+ timeout=mock.ANY,
+ infinite=False,
+ )
+
+ @mock.patch(MOCKPATH + "readurl", autospec=True)
+ def test_get_metadata_uses_extended_url(self, m_readurl):
+ """Make sure readurl is called with the correct url when accessing
+ metadata"""
+ m_readurl.return_value = url_helper.StringResponse(
+ json.dumps(IMDS_NETWORK_METADATA).encode("utf-8")
+ )
+
+ dsaz.get_metadata_from_imds(
+ retries=3,
+ md_type=dsaz.MetadataType.ALL,
+ api_version="2021-08-01",
+ )
+ m_readurl.assert_called_with(
+ "http://169.254.169.254/metadata/instance?api-version="
+ "2021-08-01&extended=true",
+ exception_cb=mock.ANY,
+ headers=mock.ANY,
+ retries=mock.ANY,
+ timeout=mock.ANY,
+ infinite=False,
+ )
+
+ @mock.patch(MOCKPATH + "readurl", autospec=True)
+ def test_get_metadata_performs_dhcp_when_network_is_down(self, m_readurl):
+ """Perform DHCP setup when nic is not up."""
+ m_readurl.return_value = url_helper.StringResponse(
+ json.dumps(NETWORK_METADATA).encode("utf-8")
+ )
+
+ self.assertEqual(
+ NETWORK_METADATA, dsaz.get_metadata_from_imds(retries=2)
+ )
+
+ self.assertIn(
+ "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time
+ self.logs.getvalue(),
+ )
+
+ m_readurl.assert_called_with(
+ self.network_md_url,
+ exception_cb=mock.ANY,
+ headers={"Metadata": "true"},
+ retries=2,
+ timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS,
+ infinite=False,
+ )
+
+ @mock.patch("cloudinit.url_helper.time.sleep")
+ def test_get_metadata_from_imds_empty_when_no_imds_present(self, m_sleep):
+ """Return empty dict when IMDS network metadata is absent."""
+ httpretty.register_uri(
+ httpretty.GET,
+ dsaz.IMDS_URL + "/instance?api-version=2017-12-01",
+ body={},
+ status=404,
+ )
+
+ self.assertEqual({}, dsaz.get_metadata_from_imds(retries=2))
+
+ self.assertEqual([mock.call(1), mock.call(1)], m_sleep.call_args_list)
+ self.assertIn(
+ "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("requests.Session.request")
+ @mock.patch("cloudinit.url_helper.time.sleep")
+ def test_get_metadata_from_imds_retries_on_timeout(
+ self, m_sleep, m_request
+ ):
+ """Retry IMDS network metadata on timeout errors."""
+
+ self.attempt = 0
+ m_request.side_effect = requests.Timeout("Fake Connection Timeout")
+
+ def retry_callback(request, uri, headers):
+ self.attempt += 1
+ raise requests.Timeout("Fake connection timeout")
+
+ httpretty.register_uri(
+ httpretty.GET,
+ dsaz.IMDS_URL + "instance?api-version=2017-12-01",
+ body=retry_callback,
+ )
+
+ self.assertEqual({}, dsaz.get_metadata_from_imds(retries=3))
+
+ self.assertEqual([mock.call(1)] * 3, m_sleep.call_args_list)
+ self.assertIn(
+ "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time
+ self.logs.getvalue(),
+ )
+
+
+class TestAzureDataSource(CiTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestAzureDataSource, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ # patch cloud_dir, so our 'seed_dir' is guaranteed empty
+ self.paths = helpers.Paths(
+ {"cloud_dir": self.tmp, "run_dir": self.tmp}
+ )
+ self.waagent_d = os.path.join(self.tmp, "var", "lib", "waagent")
+
+ self.patches = ExitStack()
+ self.addCleanup(self.patches.close)
+
+ self.patches.enter_context(
+ mock.patch.object(dsaz, "_get_random_seed", return_value="wild")
+ )
+
+ self.m_dhcp = self.patches.enter_context(
+ mock.patch.object(
+ dsaz,
+ "EphemeralDHCPv4",
+ autospec=True,
+ )
+ )
+ self.m_dhcp.return_value.lease = {}
+ self.m_dhcp.return_value.iface = "eth4"
+
+ self.m_get_metadata_from_imds = self.patches.enter_context(
+ mock.patch.object(
+ dsaz,
+ "get_metadata_from_imds",
+ mock.MagicMock(return_value=NETWORK_METADATA),
+ )
+ )
+ self.m_fallback_nic = self.patches.enter_context(
+ mock.patch(
+ "cloudinit.sources.net.find_fallback_nic", return_value="eth9"
+ )
+ )
+ self.m_remove_ubuntu_network_scripts = self.patches.enter_context(
+ mock.patch.object(
+ dsaz,
+ "maybe_remove_ubuntu_network_config_scripts",
+ mock.MagicMock(),
+ )
+ )
+ super(TestAzureDataSource, self).setUp()
+
+ def apply_patches(self, patches):
+ for module, name, new in patches:
+ self.patches.enter_context(mock.patch.object(module, name, new))
+
+ def _get_mockds(self):
+ sysctl_out = (
+ "dev.storvsc.3.%pnpinfo: "
+ "classid=ba6163d9-04a1-4d29-b605-72e2ffb1dc7f "
+ "deviceid=f8b3781b-1e82-4818-a1c3-63d806ec15bb\n"
+ )
+ sysctl_out += (
+ "dev.storvsc.2.%pnpinfo: "
+ "classid=ba6163d9-04a1-4d29-b605-72e2ffb1dc7f "
+ "deviceid=f8b3781a-1e82-4818-a1c3-63d806ec15bb\n"
+ )
+ sysctl_out += (
+ "dev.storvsc.1.%pnpinfo: "
+ "classid=32412632-86cb-44a2-9b5c-50d1417354f5 "
+ "deviceid=00000000-0001-8899-0000-000000000000\n"
+ )
+ camctl_devbus = """
+scbus0 on ata0 bus 0
+scbus1 on ata1 bus 0
+scbus2 on blkvsc0 bus 0
+scbus3 on blkvsc1 bus 0
+scbus4 on storvsc2 bus 0
+scbus5 on storvsc3 bus 0
+scbus-1 on xpt0 bus 0
+ """
+ camctl_dev = """
+<Msft Virtual CD/ROM 1.0> at scbus1 target 0 lun 0 (cd0,pass0)
+<Msft Virtual Disk 1.0> at scbus2 target 0 lun 0 (da0,pass1)
+<Msft Virtual Disk 1.0> at scbus3 target 1 lun 0 (da1,pass2)
+ """
+ self.apply_patches(
+ [
+ (
+ dsaz,
+ "get_dev_storvsc_sysctl",
+ mock.MagicMock(return_value=sysctl_out),
+ ),
+ (
+ dsaz,
+ "get_camcontrol_dev_bus",
+ mock.MagicMock(return_value=camctl_devbus),
+ ),
+ (
+ dsaz,
+ "get_camcontrol_dev",
+ mock.MagicMock(return_value=camctl_dev),
+ ),
+ ]
+ )
+ return dsaz
+
+ def _get_ds(
+ self,
+ data,
+ distro="ubuntu",
+ apply_network=None,
+ instance_id=None,
+ write_ovf_to_data_dir: bool = False,
+ write_ovf_to_seed_dir: bool = True,
+ ):
+ def _wait_for_files(flist, _maxwait=None, _naplen=None):
+ data["waited"] = flist
+ return []
+
+ def _load_possible_azure_ds(seed_dir, cache_dir):
+ yield seed_dir
+ yield dsaz.DEFAULT_PROVISIONING_ISO_DEV
+ yield from data.get("dsdevs", [])
+ if cache_dir:
+ yield cache_dir
+
+ seed_dir = os.path.join(self.paths.seed_dir, "azure")
+ if write_ovf_to_seed_dir and data.get("ovfcontent") is not None:
+ populate_dir(seed_dir, {"ovf-env.xml": data["ovfcontent"]})
+
+ if write_ovf_to_data_dir and data.get("ovfcontent") is not None:
+ populate_dir(self.waagent_d, {"ovf-env.xml": data["ovfcontent"]})
+
+ dsaz.BUILTIN_DS_CONFIG["data_dir"] = self.waagent_d
+
+ self.m_is_platform_viable = mock.MagicMock(autospec=True)
+ self.m_get_metadata_from_fabric = mock.MagicMock(return_value=[])
+ self.m_report_failure_to_fabric = mock.MagicMock(autospec=True)
+ self.m_get_interfaces = mock.MagicMock(
+ return_value=[
+ ("dummy0", "9e:65:d6:19:19:01", None, None),
+ ("eth0", "00:15:5d:69:63:ba", "hv_netvsc", "0x3"),
+ ("lo", "00:00:00:00:00:00", None, None),
+ ]
+ )
+ self.m_list_possible_azure_ds = mock.MagicMock(
+ side_effect=_load_possible_azure_ds
+ )
+
+ if instance_id:
+ self.instance_id = instance_id
+ else:
+ self.instance_id = EXAMPLE_UUID
+
+ def _dmi_mocks(key):
+ if key == "system-uuid":
+ return self.instance_id
+ elif key == "chassis-asset-tag":
+ return "7783-7084-3265-9085-8269-3286-77"
+
+ self.apply_patches(
+ [
+ (
+ dsaz,
+ "list_possible_azure_ds",
+ self.m_list_possible_azure_ds,
+ ),
+ (dsaz, "_is_platform_viable", self.m_is_platform_viable),
+ (
+ dsaz,
+ "get_metadata_from_fabric",
+ self.m_get_metadata_from_fabric,
+ ),
+ (
+ dsaz,
+ "report_failure_to_fabric",
+ self.m_report_failure_to_fabric,
+ ),
+ (dsaz, "get_boot_telemetry", mock.MagicMock()),
+ (dsaz, "get_system_info", mock.MagicMock()),
+ (
+ dsaz.net,
+ "get_interface_mac",
+ mock.MagicMock(return_value="00:15:5d:69:63:ba"),
+ ),
+ (
+ dsaz.net,
+ "get_interfaces",
+ self.m_get_interfaces,
+ ),
+ (dsaz.subp, "which", lambda x: True),
+ (
+ dsaz.dmi,
+ "read_dmi_data",
+ mock.MagicMock(side_effect=_dmi_mocks),
+ ),
+ (
+ dsaz.util,
+ "wait_for_files",
+ mock.MagicMock(side_effect=_wait_for_files),
+ ),
+ ]
+ )
+
+ if isinstance(distro, str):
+ distro_cls = distros.fetch(distro)
+ distro = distro_cls(distro, data.get("sys_cfg", {}), self.paths)
+ dsrc = dsaz.DataSourceAzure(
+ data.get("sys_cfg", {}), distro=distro, paths=self.paths
+ )
+ if apply_network is not None:
+ dsrc.ds_cfg["apply_network_config"] = apply_network
+
+ return dsrc
+
+ def _get_and_setup(self, dsrc):
+ ret = dsrc.get_data()
+ if ret:
+ dsrc.setup(True)
+ return ret
+
+ def xml_equals(self, oxml, nxml):
+ """Compare two sets of XML to make sure they are equal"""
+
+ def create_tag_index(xml):
+ et = ET.fromstring(xml)
+ ret = {}
+ for x in et.iter():
+ ret[x.tag] = x
+ return ret
+
+ def tags_exists(x, y):
+ for tag in x.keys():
+ assert tag in y
+ for tag in y.keys():
+ assert tag in x
+
+ def tags_equal(x, y):
+ for x_val in x.values():
+ y_val = y.get(x_val.tag)
+ assert x_val.text == y_val.text
+
+ old_cnt = create_tag_index(oxml)
+ new_cnt = create_tag_index(nxml)
+ tags_exists(old_cnt, new_cnt)
+ tags_equal(old_cnt, new_cnt)
+
+ def xml_notequals(self, oxml, nxml):
+ try:
+ self.xml_equals(oxml, nxml)
+ except AssertionError:
+ return
+ raise AssertionError("XML is the same")
+
+ def test_get_resource_disk(self):
+ ds = self._get_mockds()
+ dev = ds.get_resource_disk_on_freebsd(1)
+ self.assertEqual("da1", dev)
+
+ def test_not_is_platform_viable_seed_should_return_no_datasource(self):
+ """Check seed_dir using _is_platform_viable and return False."""
+ # Return a non-matching asset tag value
+ data = {}
+ dsrc = self._get_ds(data)
+ self.m_is_platform_viable.return_value = False
+ with mock.patch.object(
+ dsrc, "crawl_metadata"
+ ) as m_crawl_metadata, mock.patch.object(
+ dsrc, "_report_failure"
+ ) as m_report_failure:
+ ret = dsrc.get_data()
+ self.m_is_platform_viable.assert_called_with(dsrc.seed_dir)
+ self.assertFalse(ret)
+ # Assert that for non viable platforms,
+ # there is no communication with the Azure datasource.
+ self.assertEqual(0, m_crawl_metadata.call_count)
+ self.assertEqual(0, m_report_failure.call_count)
+
+ def test_platform_viable_but_no_devs_should_return_no_datasource(self):
+ """For platforms where the Azure platform is viable
+ (which is indicated by the matching asset tag),
+ the absence of any devs at all (devs == candidate sources
+ for crawling Azure datasource) is NOT expected.
+ Report failure to Azure as this is an unexpected fatal error.
+ """
+ data = {}
+ dsrc = self._get_ds(data)
+ with mock.patch.object(dsrc, "_report_failure") as m_report_failure:
+ self.m_is_platform_viable.return_value = True
+ ret = dsrc.get_data()
+ self.m_is_platform_viable.assert_called_with(dsrc.seed_dir)
+ self.assertFalse(ret)
+ self.assertEqual(1, m_report_failure.call_count)
+
+ def test_crawl_metadata_exception_returns_no_datasource(self):
+ data = {}
+ dsrc = self._get_ds(data)
+ self.m_is_platform_viable.return_value = True
+ with mock.patch.object(dsrc, "crawl_metadata") as m_crawl_metadata:
+ m_crawl_metadata.side_effect = Exception
+ ret = dsrc.get_data()
+ self.m_is_platform_viable.assert_called_with(dsrc.seed_dir)
+ self.assertEqual(1, m_crawl_metadata.call_count)
+ self.assertFalse(ret)
+
+ def test_crawl_metadata_exception_should_report_failure_with_msg(self):
+ data = {}
+ dsrc = self._get_ds(data)
+ self.m_is_platform_viable.return_value = True
+ with mock.patch.object(
+ dsrc, "crawl_metadata"
+ ) as m_crawl_metadata, mock.patch.object(
+ dsrc, "_report_failure"
+ ) as m_report_failure:
+ m_crawl_metadata.side_effect = Exception
+ dsrc.get_data()
+ self.assertEqual(1, m_crawl_metadata.call_count)
+ m_report_failure.assert_called_once_with(
+ description=dsaz.DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE
+ )
+
+ def test_crawl_metadata_exc_should_log_could_not_crawl_msg(self):
+ data = {}
+ dsrc = self._get_ds(data)
+ self.m_is_platform_viable.return_value = True
+ with mock.patch.object(dsrc, "crawl_metadata") as m_crawl_metadata:
+ m_crawl_metadata.side_effect = Exception
+ dsrc.get_data()
+ self.assertEqual(1, m_crawl_metadata.call_count)
+ self.assertIn(
+ "Could not crawl Azure metadata", self.logs.getvalue()
+ )
+
+ def test_basic_seed_dir(self):
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": {},
+ }
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(dsrc.userdata_raw, "")
+ self.assertEqual(dsrc.metadata["local-hostname"], odata["HostName"])
+ self.assertTrue(
+ os.path.isfile(os.path.join(self.waagent_d, "ovf-env.xml"))
+ )
+ self.assertEqual("azure", dsrc.cloud_name)
+ self.assertEqual("azure", dsrc.platform_type)
+ self.assertEqual(
+ "seed-dir (%s/seed/azure)" % self.tmp, dsrc.subplatform
+ )
+
+ def test_data_dir_without_imds_data(self):
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": {},
+ }
+ dsrc = self._get_ds(
+ data, write_ovf_to_data_dir=True, write_ovf_to_seed_dir=False
+ )
+
+ self.m_get_metadata_from_imds.return_value = {}
+ with mock.patch(MOCKPATH + "util.mount_cb") as m_mount_cb:
+ m_mount_cb.side_effect = [
+ MountFailedError("fail"),
+ ({"local-hostname": "me"}, "ud", {"cfg": ""}, {}),
+ ]
+ ret = dsrc.get_data()
+
+ self.assertTrue(ret)
+ self.assertEqual(dsrc.userdata_raw, "")
+ self.assertEqual(dsrc.metadata["local-hostname"], odata["HostName"])
+ self.assertTrue(
+ os.path.isfile(os.path.join(self.waagent_d, "ovf-env.xml"))
+ )
+ self.assertEqual("azure", dsrc.cloud_name)
+ self.assertEqual("azure", dsrc.platform_type)
+ self.assertEqual("seed-dir (%s)" % self.waagent_d, dsrc.subplatform)
+
+ def test_basic_dev_file(self):
+ """When a device path is used, present that in subplatform."""
+ data = {"sys_cfg": {}, "dsdevs": ["/dev/cd0"]}
+ dsrc = self._get_ds(data)
+ # DSAzure will attempt to mount /dev/sr0 first, which should
+ # fail with mount error since the list of devices doesn't have
+ # /dev/sr0
+ with mock.patch(MOCKPATH + "util.mount_cb") as m_mount_cb:
+ m_mount_cb.side_effect = [
+ MountFailedError("fail"),
+ ({"local-hostname": "me"}, "ud", {"cfg": ""}, {}),
+ ]
+ self.assertTrue(dsrc.get_data())
+ self.assertEqual(dsrc.userdata_raw, "ud")
+ self.assertEqual(dsrc.metadata["local-hostname"], "me")
+ self.assertEqual("azure", dsrc.cloud_name)
+ self.assertEqual("azure", dsrc.platform_type)
+ self.assertEqual("config-disk (/dev/cd0)", dsrc.subplatform)
+
+ def test_get_data_non_ubuntu_will_not_remove_network_scripts(self):
+ """get_data on non-Ubuntu will not remove ubuntu net scripts."""
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": {},
+ }
+
+ dsrc = self._get_ds(data, distro="debian")
+ dsrc.get_data()
+ self.m_remove_ubuntu_network_scripts.assert_not_called()
+
+ def test_get_data_on_ubuntu_will_remove_network_scripts(self):
+ """get_data will remove ubuntu net scripts on Ubuntu distro."""
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+
+ dsrc = self._get_ds(data, distro="ubuntu")
+ dsrc.get_data()
+ self.m_remove_ubuntu_network_scripts.assert_called_once_with()
+
+ def test_get_data_on_ubuntu_will_not_remove_network_scripts_disabled(self):
+ """When apply_network_config false, do not remove scripts on Ubuntu."""
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": False}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+
+ dsrc = self._get_ds(data, distro="ubuntu")
+ dsrc.get_data()
+ self.m_remove_ubuntu_network_scripts.assert_not_called()
+
+ def test_crawl_metadata_returns_structured_data_and_caches_nothing(self):
+ """Return all structured metadata and cache no class attributes."""
+ yaml_cfg = ""
+ odata = {
+ "HostName": "myhost",
+ "UserName": "myuser",
+ "UserData": {"text": "FOOBAR", "encoding": "plain"},
+ "dscfg": {"text": yaml_cfg, "encoding": "plain"},
+ }
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": {},
+ }
+ dsrc = self._get_ds(data)
+ expected_cfg = {
+ "PreprovisionedVMType": None,
+ "PreprovisionedVm": False,
+ "datasource": {"Azure": {}},
+ "system_info": {"default_user": {"name": "myuser"}},
+ }
+ expected_metadata = {
+ "azure_data": {
+ "configurationsettype": "LinuxProvisioningConfiguration"
+ },
+ "imds": NETWORK_METADATA,
+ "instance-id": EXAMPLE_UUID,
+ "local-hostname": "myhost",
+ "random_seed": "wild",
+ }
+
+ crawled_metadata = dsrc.crawl_metadata()
+
+ self.assertCountEqual(
+ crawled_metadata.keys(),
+ ["cfg", "files", "metadata", "userdata_raw"],
+ )
+ self.assertEqual(crawled_metadata["cfg"], expected_cfg)
+ self.assertEqual(
+ list(crawled_metadata["files"].keys()), ["ovf-env.xml"]
+ )
+ self.assertIn(
+ b"<HostName>myhost</HostName>",
+ crawled_metadata["files"]["ovf-env.xml"],
+ )
+ self.assertEqual(crawled_metadata["metadata"], expected_metadata)
+ self.assertEqual(crawled_metadata["userdata_raw"], "FOOBAR")
+ self.assertEqual(dsrc.userdata_raw, None)
+ self.assertEqual(dsrc.metadata, {})
+ self.assertEqual(dsrc._metadata_imds, UNSET)
+ self.assertFalse(
+ os.path.isfile(os.path.join(self.waagent_d, "ovf-env.xml"))
+ )
+
+ def test_crawl_metadata_raises_invalid_metadata_on_error(self):
+ """crawl_metadata raises an exception on invalid ovf-env.xml."""
+ data = {"ovfcontent": "BOGUS", "sys_cfg": {}}
+ dsrc = self._get_ds(data)
+ error_msg = (
+ "BrokenAzureDataSource: Invalid ovf-env.xml:"
+ " syntax error: line 1, column 0"
+ )
+ with self.assertRaises(InvalidMetaDataException) as cm:
+ dsrc.crawl_metadata()
+ self.assertEqual(str(cm.exception), error_msg)
+
+ def test_crawl_metadata_call_imds_once_no_reprovision(self):
+ """If reprovisioning, report ready at the end"""
+ ovfenv = construct_valid_ovf_env(
+ platform_settings={"PreprovisionedVm": "False"}
+ )
+
+ data = {"ovfcontent": ovfenv, "sys_cfg": {}}
+ dsrc = self._get_ds(data)
+ dsrc.crawl_metadata()
+ self.assertEqual(1, self.m_get_metadata_from_imds.call_count)
+
+ @mock.patch("cloudinit.sources.DataSourceAzure.util.write_file")
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready"
+ )
+ @mock.patch("cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds")
+ def test_crawl_metadata_call_imds_twice_with_reprovision(
+ self, poll_imds_func, m_report_ready, m_write
+ ):
+ """If reprovisioning, imds metadata will be fetched twice"""
+ ovfenv = construct_valid_ovf_env(
+ platform_settings={"PreprovisionedVm": "True"}
+ )
+
+ data = {"ovfcontent": ovfenv, "sys_cfg": {}}
+ dsrc = self._get_ds(data)
+ poll_imds_func.return_value = ovfenv
+ dsrc.crawl_metadata()
+ self.assertEqual(2, self.m_get_metadata_from_imds.call_count)
+
+ @mock.patch("cloudinit.sources.DataSourceAzure.util.write_file")
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready"
+ )
+ @mock.patch("cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds")
+ def test_crawl_metadata_on_reprovision_reports_ready(
+ self, poll_imds_func, m_report_ready, m_write
+ ):
+ """If reprovisioning, report ready at the end"""
+ ovfenv = construct_valid_ovf_env(
+ platform_settings={"PreprovisionedVm": "True"}
+ )
+
+ data = {"ovfcontent": ovfenv, "sys_cfg": {}}
+ dsrc = self._get_ds(data)
+ poll_imds_func.return_value = ovfenv
+ dsrc.crawl_metadata()
+ self.assertEqual(1, m_report_ready.call_count)
+
+ @mock.patch("cloudinit.sources.DataSourceAzure.util.write_file")
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready"
+ )
+ @mock.patch("cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds")
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.DataSourceAzure."
+ "_wait_for_all_nics_ready"
+ )
+ def test_crawl_metadata_waits_for_nic_on_savable_vms(
+ self, detect_nics, poll_imds_func, report_ready_func, m_write
+ ):
+ """If reprovisioning, report ready at the end"""
+ ovfenv = construct_valid_ovf_env(
+ platform_settings={
+ "PreprovisionedVMType": "Savable",
+ "PreprovisionedVm": "True",
+ }
+ )
+
+ data = {"ovfcontent": ovfenv, "sys_cfg": {}}
+ dsrc = self._get_ds(data)
+ poll_imds_func.return_value = ovfenv
+ dsrc.crawl_metadata()
+ self.assertEqual(1, report_ready_func.call_count)
+ self.assertEqual(1, detect_nics.call_count)
+
+ @mock.patch("cloudinit.sources.DataSourceAzure.util.write_file")
+ @mock.patch(
+ "cloudinit.sources.helpers.netlink.wait_for_media_disconnect_connect"
+ )
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready",
+ return_value=True,
+ )
+ @mock.patch("cloudinit.sources.DataSourceAzure.readurl")
+ def test_crawl_metadata_on_reprovision_reports_ready_using_lease(
+ self, m_readurl, m_report_ready, m_media_switch, m_write
+ ):
+ """If reprovisioning, report ready using the obtained lease"""
+ ovfenv = construct_valid_ovf_env(
+ platform_settings={"PreprovisionedVm": "True"}
+ )
+
+ data = {"ovfcontent": ovfenv, "sys_cfg": {}}
+ dsrc = self._get_ds(data)
+
+ lease = {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ "unknown-245": "624c3620",
+ }
+ self.m_dhcp.return_value.obtain_lease.return_value = lease
+ m_media_switch.return_value = None
+
+ reprovision_ovfenv = construct_valid_ovf_env()
+ m_readurl.return_value = url_helper.StringResponse(
+ reprovision_ovfenv.encode("utf-8")
+ )
+
+ dsrc.crawl_metadata()
+
+ assert m_report_ready.mock_calls == [
+ mock.call(),
+ mock.call(pubkey_info=None),
+ ]
+
+ def test_waagent_d_has_0700_perms(self):
+ # we expect /var/lib/waagent to be created 0700
+ dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertTrue(os.path.isdir(self.waagent_d))
+ self.assertEqual(stat.S_IMODE(os.stat(self.waagent_d).st_mode), 0o700)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
+ )
+ def test_network_config_set_from_imds(self, m_driver):
+ """Datasource.network_config returns IMDS network data."""
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+ expected_network_config = {
+ "ethernets": {
+ "eth0": {
+ "set-name": "eth0",
+ "match": {"macaddress": "00:0d:3a:04:75:98"},
+ "dhcp6": False,
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ }
+ },
+ "version": 2,
+ }
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+ self.assertEqual(expected_network_config, dsrc.network_config)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
+ )
+ def test_network_config_set_from_imds_route_metric_for_secondary_nic(
+ self, m_driver
+ ):
+ """Datasource.network_config adds route-metric to secondary nics."""
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+ expected_network_config = {
+ "ethernets": {
+ "eth0": {
+ "set-name": "eth0",
+ "match": {"macaddress": "00:0d:3a:04:75:98"},
+ "dhcp6": False,
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ },
+ "eth1": {
+ "set-name": "eth1",
+ "match": {"macaddress": "22:0d:3a:04:75:98"},
+ "dhcp6": False,
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 200},
+ },
+ "eth2": {
+ "set-name": "eth2",
+ "match": {"macaddress": "33:0d:3a:04:75:98"},
+ "dhcp6": False,
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 300},
+ },
+ },
+ "version": 2,
+ }
+ imds_data = copy.deepcopy(NETWORK_METADATA)
+ imds_data["network"]["interface"].append(SECONDARY_INTERFACE)
+ third_intf = copy.deepcopy(SECONDARY_INTERFACE)
+ third_intf["macAddress"] = third_intf["macAddress"].replace("22", "33")
+ third_intf["ipv4"]["subnet"][0]["address"] = "10.0.2.0"
+ third_intf["ipv4"]["ipAddress"][0]["privateIpAddress"] = "10.0.2.6"
+ imds_data["network"]["interface"].append(third_intf)
+
+ self.m_get_metadata_from_imds.return_value = imds_data
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+ self.assertEqual(expected_network_config, dsrc.network_config)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
+ )
+ def test_network_config_set_from_imds_for_secondary_nic_no_ip(
+ self, m_driver
+ ):
+ """If an IP address is empty then there should no config for it."""
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+ expected_network_config = {
+ "ethernets": {
+ "eth0": {
+ "set-name": "eth0",
+ "match": {"macaddress": "00:0d:3a:04:75:98"},
+ "dhcp6": False,
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ }
+ },
+ "version": 2,
+ }
+ imds_data = copy.deepcopy(NETWORK_METADATA)
+ imds_data["network"]["interface"].append(SECONDARY_INTERFACE_NO_IP)
+ self.m_get_metadata_from_imds.return_value = imds_data
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+ self.assertEqual(expected_network_config, dsrc.network_config)
+
+ def test_availability_zone_set_from_imds(self):
+ """Datasource.availability returns IMDS platformFaultDomain."""
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+ self.assertEqual("0", dsrc.availability_zone)
+
+ def test_region_set_from_imds(self):
+ """Datasource.region returns IMDS region location."""
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+ self.assertEqual("eastus2", dsrc.region)
+
+ def test_sys_cfg_set_never_destroy_ntfs(self):
+ sys_cfg = {
+ "datasource": {
+ "Azure": {"never_destroy_ntfs": "user-supplied-value"}
+ }
+ }
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data={}),
+ "sys_cfg": sys_cfg,
+ }
+
+ dsrc = self._get_ds(data)
+ ret = self._get_and_setup(dsrc)
+ self.assertTrue(ret)
+ self.assertEqual(
+ dsrc.ds_cfg.get(dsaz.DS_CFG_KEY_PRESERVE_NTFS),
+ "user-supplied-value",
+ )
+
+ def test_username_used(self):
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {"ovfcontent": construct_valid_ovf_env(data=odata)}
+
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(
+ dsrc.cfg["system_info"]["default_user"]["name"], "myuser"
+ )
+
+ def test_password_given(self):
+ odata = {
+ "HostName": "myhost",
+ "UserName": "myuser",
+ "UserPassword": "mypass",
+ }
+ data = {"ovfcontent": construct_valid_ovf_env(data=odata)}
+
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertIn("default_user", dsrc.cfg["system_info"])
+ defuser = dsrc.cfg["system_info"]["default_user"]
+
+ # default user should be updated username and should not be locked.
+ self.assertEqual(defuser["name"], odata["UserName"])
+ self.assertFalse(defuser["lock_passwd"])
+ # passwd is crypt formated string $id$salt$encrypted
+ # encrypting plaintext with salt value of everything up to final '$'
+ # should equal that after the '$'
+ pos = defuser["passwd"].rfind("$") + 1
+ self.assertEqual(
+ defuser["passwd"],
+ crypt.crypt(odata["UserPassword"], defuser["passwd"][0:pos]),
+ )
+
+ # the same hashed value should also be present in cfg['password']
+ self.assertEqual(defuser["passwd"], dsrc.cfg["password"])
+
+ def test_user_not_locked_if_password_redacted(self):
+ odata = {
+ "HostName": "myhost",
+ "UserName": "myuser",
+ "UserPassword": dsaz.DEF_PASSWD_REDACTION,
+ }
+ data = {"ovfcontent": construct_valid_ovf_env(data=odata)}
+
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertIn("default_user", dsrc.cfg["system_info"])
+ defuser = dsrc.cfg["system_info"]["default_user"]
+
+ # default user should be updated username and should not be locked.
+ self.assertEqual(defuser["name"], odata["UserName"])
+ self.assertIn("lock_passwd", defuser)
+ self.assertFalse(defuser["lock_passwd"])
+
+ def test_userdata_plain(self):
+ mydata = "FOOBAR"
+ odata = {"UserData": {"text": mydata, "encoding": "plain"}}
+ data = {"ovfcontent": construct_valid_ovf_env(data=odata)}
+
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(decode_binary(dsrc.userdata_raw), mydata)
+
+ def test_userdata_found(self):
+ mydata = "FOOBAR"
+ odata = {"UserData": {"text": b64e(mydata), "encoding": "base64"}}
+ data = {"ovfcontent": construct_valid_ovf_env(data=odata)}
+
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(dsrc.userdata_raw, mydata.encode("utf-8"))
+
+ def test_default_ephemeral_configs_ephemeral_exists(self):
+ # make sure the ephemeral configs are correct if disk present
+ odata = {}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": {},
+ }
+
+ orig_exists = dsaz.os.path.exists
+
+ def changed_exists(path):
+ return (
+ True if path == dsaz.RESOURCE_DISK_PATH else orig_exists(path)
+ )
+
+ with mock.patch(MOCKPATH + "os.path.exists", new=changed_exists):
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ cfg = dsrc.get_config_obj()
+
+ self.assertEqual(
+ dsrc.device_name_to_device("ephemeral0"),
+ dsaz.RESOURCE_DISK_PATH,
+ )
+ assert "disk_setup" in cfg
+ assert "fs_setup" in cfg
+ self.assertIsInstance(cfg["disk_setup"], dict)
+ self.assertIsInstance(cfg["fs_setup"], list)
+
+ def test_default_ephemeral_configs_ephemeral_does_not_exist(self):
+ # make sure the ephemeral configs are correct if disk not present
+ odata = {}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": {},
+ }
+
+ orig_exists = dsaz.os.path.exists
+
+ def changed_exists(path):
+ return (
+ False if path == dsaz.RESOURCE_DISK_PATH else orig_exists(path)
+ )
+
+ with mock.patch(MOCKPATH + "os.path.exists", new=changed_exists):
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ cfg = dsrc.get_config_obj()
+
+ assert "disk_setup" not in cfg
+ assert "fs_setup" not in cfg
+
+ def test_provide_disk_aliases(self):
+ # Make sure that user can affect disk aliases
+ dscfg = {"disk_aliases": {"ephemeral0": "/dev/sdc"}}
+ odata = {
+ "HostName": "myhost",
+ "UserName": "myuser",
+ "dscfg": {"text": b64e(yaml.dump(dscfg)), "encoding": "base64"},
+ }
+ usercfg = {
+ "disk_setup": {
+ "/dev/sdc": {"something": "..."},
+ "ephemeral0": False,
+ }
+ }
+ userdata = "#cloud-config" + yaml.dump(usercfg) + "\n"
+
+ ovfcontent = construct_valid_ovf_env(data=odata, userdata=userdata)
+ data = {"ovfcontent": ovfcontent, "sys_cfg": {}}
+
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ cfg = dsrc.get_config_obj()
+ self.assertTrue(cfg)
+
+ def test_userdata_arrives(self):
+ userdata = "This is my user-data"
+ xml = construct_valid_ovf_env(data={}, userdata=userdata)
+ data = {"ovfcontent": xml}
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+
+ self.assertEqual(userdata.encode("us-ascii"), dsrc.userdata_raw)
+
+ def test_password_redacted_in_ovf(self):
+ odata = {
+ "HostName": "myhost",
+ "UserName": "myuser",
+ "UserPassword": "mypass",
+ }
+ data = {"ovfcontent": construct_valid_ovf_env(data=odata)}
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+
+ self.assertTrue(ret)
+ ovf_env_path = os.path.join(self.waagent_d, "ovf-env.xml")
+
+ # The XML should not be same since the user password is redacted
+ on_disk_ovf = load_file(ovf_env_path)
+ self.xml_notequals(data["ovfcontent"], on_disk_ovf)
+
+ # Make sure that the redacted password on disk is not used by CI
+ self.assertNotEqual(
+ dsrc.cfg.get("password"), dsaz.DEF_PASSWD_REDACTION
+ )
+
+ # Make sure that the password was really encrypted
+ et = ET.fromstring(on_disk_ovf)
+ for elem in et.iter():
+ if "UserPassword" in elem.tag:
+ self.assertEqual(dsaz.DEF_PASSWD_REDACTION, elem.text)
+
+ def test_ovf_env_arrives_in_waagent_dir(self):
+ xml = construct_valid_ovf_env(data={}, userdata="FOODATA")
+ dsrc = self._get_ds({"ovfcontent": xml})
+ dsrc.get_data()
+
+ # 'data_dir' is '/var/lib/waagent' (walinux-agent's state dir)
+ # we expect that the ovf-env.xml file is copied there.
+ ovf_env_path = os.path.join(self.waagent_d, "ovf-env.xml")
+ self.assertTrue(os.path.exists(ovf_env_path))
+ self.xml_equals(xml, load_file(ovf_env_path))
+
+ def test_ovf_can_include_unicode(self):
+ xml = construct_valid_ovf_env(data={})
+ xml = "\ufeff{0}".format(xml)
+ dsrc = self._get_ds({"ovfcontent": xml})
+ dsrc.get_data()
+
+ def test_dsaz_report_ready_returns_true_when_report_succeeds(self):
+ dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+ assert dsrc._report_ready() == []
+
+ @mock.patch(MOCKPATH + "report_diagnostic_event")
+ def test_dsaz_report_ready_failure_reports_telemetry(self, m_report_diag):
+ dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+ self.m_get_metadata_from_fabric.side_effect = Exception("foo")
+
+ with pytest.raises(Exception):
+ dsrc._report_ready()
+
+ assert m_report_diag.mock_calls == [
+ mock.call(
+ "Error communicating with Azure fabric; "
+ "You may experience connectivity issues: foo",
+ logger_func=dsaz.LOG.warning,
+ )
+ ]
+
+ def test_dsaz_report_failure_returns_true_when_report_succeeds(self):
+ dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+
+ with mock.patch.object(dsrc, "crawl_metadata") as m_crawl_metadata:
+ # mock crawl metadata failure to cause report failure
+ m_crawl_metadata.side_effect = Exception
+
+ self.assertTrue(dsrc._report_failure())
+ self.assertEqual(1, self.m_report_failure_to_fabric.call_count)
+
+ def test_dsaz_report_failure_returns_false_and_does_not_propagate_exc(
+ self,
+ ):
+ dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+
+ with mock.patch.object(
+ dsrc, "crawl_metadata"
+ ) as m_crawl_metadata, mock.patch.object(
+ dsrc, "_ephemeral_dhcp_ctx"
+ ) as m_ephemeral_dhcp_ctx, mock.patch.object(
+ dsrc.distro.networking, "is_up"
+ ) as m_dsrc_distro_networking_is_up:
+ # mock crawl metadata failure to cause report failure
+ m_crawl_metadata.side_effect = Exception
+
+ # setup mocks to allow using cached ephemeral dhcp lease
+ m_dsrc_distro_networking_is_up.return_value = True
+ test_lease_dhcp_option_245 = "test_lease_dhcp_option_245"
+ test_lease = {"unknown-245": test_lease_dhcp_option_245}
+ m_ephemeral_dhcp_ctx.lease = test_lease
+
+ # We expect 2 calls to report_failure_to_fabric,
+ # because we try 2 different methods of calling report failure.
+ # The different methods are attempted in the following order:
+ # 1. Using cached ephemeral dhcp context to report failure to Azure
+ # 2. Using new ephemeral dhcp to report failure to Azure
+ self.m_report_failure_to_fabric.side_effect = Exception
+ self.assertFalse(dsrc._report_failure())
+ self.assertEqual(2, self.m_report_failure_to_fabric.call_count)
+
+ def test_dsaz_report_failure_description_msg(self):
+ dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+
+ with mock.patch.object(dsrc, "crawl_metadata") as m_crawl_metadata:
+ # mock crawl metadata failure to cause report failure
+ m_crawl_metadata.side_effect = Exception
+
+ test_msg = "Test report failure description message"
+ self.assertTrue(dsrc._report_failure(description=test_msg))
+ self.m_report_failure_to_fabric.assert_called_once_with(
+ dhcp_opts=mock.ANY, description=test_msg
+ )
+
+ def test_dsaz_report_failure_no_description_msg(self):
+ dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+
+ with mock.patch.object(dsrc, "crawl_metadata") as m_crawl_metadata:
+ m_crawl_metadata.side_effect = Exception
+
+ self.assertTrue(dsrc._report_failure()) # no description msg
+ self.m_report_failure_to_fabric.assert_called_once_with(
+ dhcp_opts=mock.ANY, description=None
+ )
+
+ def test_dsaz_report_failure_uses_cached_ephemeral_dhcp_ctx_lease(self):
+ dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+
+ with mock.patch.object(
+ dsrc, "crawl_metadata"
+ ) as m_crawl_metadata, mock.patch.object(
+ dsrc, "_wireserver_endpoint", return_value="test-ep"
+ ) as m_wireserver_endpoint:
+ # mock crawl metadata failure to cause report failure
+ m_crawl_metadata.side_effect = Exception
+
+ self.assertTrue(dsrc._report_failure())
+
+ # ensure called with cached ephemeral dhcp lease option 245
+ self.m_report_failure_to_fabric.assert_called_once_with(
+ description=mock.ANY, dhcp_opts=m_wireserver_endpoint
+ )
+
+ def test_dsaz_report_failure_no_net_uses_new_ephemeral_dhcp_lease(self):
+ dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+
+ with mock.patch.object(dsrc, "crawl_metadata") as m_crawl_metadata:
+ # mock crawl metadata failure to cause report failure
+ m_crawl_metadata.side_effect = Exception
+
+ test_lease_dhcp_option_245 = "test_lease_dhcp_option_245"
+ test_lease = {
+ "unknown-245": test_lease_dhcp_option_245,
+ "interface": "eth0",
+ }
+ self.m_dhcp.return_value.obtain_lease.return_value = test_lease
+
+ self.assertTrue(dsrc._report_failure())
+
+ # ensure called with the newly discovered
+ # ephemeral dhcp lease option 245
+ self.m_report_failure_to_fabric.assert_called_once_with(
+ description=mock.ANY, dhcp_opts=test_lease_dhcp_option_245
+ )
+
+ def test_exception_fetching_fabric_data_doesnt_propagate(self):
+ """Errors communicating with fabric should warn, but return True."""
+ dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+ self.m_get_metadata_from_fabric.side_effect = Exception
+ ret = self._get_and_setup(dsrc)
+ self.assertTrue(ret)
+
+ def test_fabric_data_included_in_metadata(self):
+ dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+ self.m_get_metadata_from_fabric.return_value = ["ssh-key-value"]
+ ret = self._get_and_setup(dsrc)
+ self.assertTrue(ret)
+ self.assertEqual(["ssh-key-value"], dsrc.metadata["public-keys"])
+
+ def test_instance_id_case_insensitive(self):
+ """Return the previous iid when current is a case-insensitive match."""
+ lower_iid = EXAMPLE_UUID.lower()
+ upper_iid = EXAMPLE_UUID.upper()
+ # lowercase current UUID
+ ds = self._get_ds(
+ {"ovfcontent": construct_valid_ovf_env()}, instance_id=lower_iid
+ )
+ # UPPERCASE previous
+ write_file(
+ os.path.join(self.paths.cloud_dir, "data", "instance-id"),
+ upper_iid,
+ )
+ ds.get_data()
+ self.assertEqual(upper_iid, ds.metadata["instance-id"])
+
+ # UPPERCASE current UUID
+ ds = self._get_ds(
+ {"ovfcontent": construct_valid_ovf_env()}, instance_id=upper_iid
+ )
+ # lowercase previous
+ write_file(
+ os.path.join(self.paths.cloud_dir, "data", "instance-id"),
+ lower_iid,
+ )
+ ds.get_data()
+ self.assertEqual(lower_iid, ds.metadata["instance-id"])
+
+ def test_instance_id_endianness(self):
+ """Return the previous iid when dmi uuid is the byteswapped iid."""
+ ds = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+ # byte-swapped previous
+ write_file(
+ os.path.join(self.paths.cloud_dir, "data", "instance-id"),
+ "544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8",
+ )
+ ds.get_data()
+ self.assertEqual(
+ "544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8", ds.metadata["instance-id"]
+ )
+ # not byte-swapped previous
+ write_file(
+ os.path.join(self.paths.cloud_dir, "data", "instance-id"),
+ "644CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8",
+ )
+ ds.get_data()
+ self.assertEqual(self.instance_id, ds.metadata["instance-id"])
+
+ def test_instance_id_from_dmidecode_used(self):
+ ds = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+ ds.get_data()
+ self.assertEqual(self.instance_id, ds.metadata["instance-id"])
+
+ def test_instance_id_from_dmidecode_used_for_builtin(self):
+ ds = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+ ds.get_data()
+ self.assertEqual(self.instance_id, ds.metadata["instance-id"])
+
+ @mock.patch(MOCKPATH + "util.is_FreeBSD")
+ @mock.patch(MOCKPATH + "_check_freebsd_cdrom")
+ def test_list_possible_azure_ds(self, m_check_fbsd_cdrom, m_is_FreeBSD):
+ """On FreeBSD, possible devs should show /dev/cd0."""
+ m_is_FreeBSD.return_value = True
+ m_check_fbsd_cdrom.return_value = True
+ possible_ds = []
+ for src in dsaz.list_possible_azure_ds("seed_dir", "cache_dir"):
+ possible_ds.append(src)
+ self.assertEqual(
+ possible_ds,
+ [
+ "seed_dir",
+ dsaz.DEFAULT_PROVISIONING_ISO_DEV,
+ "/dev/cd0",
+ "cache_dir",
+ ],
+ )
+ self.assertEqual(
+ [mock.call("/dev/cd0")], m_check_fbsd_cdrom.call_args_list
+ )
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
+ )
+ @mock.patch("cloudinit.net.generate_fallback_config")
+ def test_imds_network_config(self, mock_fallback, m_driver):
+ """Network config is generated from IMDS network data when present."""
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+
+ expected_cfg = {
+ "ethernets": {
+ "eth0": {
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ "dhcp6": False,
+ "match": {"macaddress": "00:0d:3a:04:75:98"},
+ "set-name": "eth0",
+ }
+ },
+ "version": 2,
+ }
+
+ self.assertEqual(expected_cfg, dsrc.network_config)
+ mock_fallback.assert_not_called()
+
+ @mock.patch("cloudinit.net.get_interface_mac")
+ @mock.patch("cloudinit.net.get_devicelist")
+ @mock.patch("cloudinit.net.device_driver")
+ @mock.patch("cloudinit.net.generate_fallback_config")
+ def test_imds_network_ignored_when_apply_network_config_false(
+ self, mock_fallback, mock_dd, mock_devlist, mock_get_mac
+ ):
+ """When apply_network_config is False, use fallback instead of IMDS."""
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": False}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+ fallback_config = {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": "eth0",
+ "mac_address": "00:11:22:33:44:55",
+ "params": {"driver": "hv_netsvc"},
+ "subnets": [{"type": "dhcp"}],
+ }
+ ],
+ }
+ mock_fallback.return_value = fallback_config
+
+ mock_devlist.return_value = ["eth0"]
+ mock_dd.return_value = ["hv_netsvc"]
+ mock_get_mac.return_value = "00:11:22:33:44:55"
+
+ dsrc = self._get_ds(data)
+ self.assertTrue(dsrc.get_data())
+ self.assertEqual(dsrc.network_config, fallback_config)
+
+ @mock.patch("cloudinit.net.get_interface_mac")
+ @mock.patch("cloudinit.net.get_devicelist")
+ @mock.patch("cloudinit.net.device_driver")
+ @mock.patch("cloudinit.net.generate_fallback_config", autospec=True)
+ def test_fallback_network_config(
+ self, mock_fallback, mock_dd, mock_devlist, mock_get_mac
+ ):
+ """On absent IMDS network data, generate network fallback config."""
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": {},
+ }
+
+ fallback_config = {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": "eth0",
+ "mac_address": "00:11:22:33:44:55",
+ "params": {"driver": "hv_netsvc"},
+ "subnets": [{"type": "dhcp"}],
+ }
+ ],
+ }
+ mock_fallback.return_value = fallback_config
+
+ mock_devlist.return_value = ["eth0"]
+ mock_dd.return_value = ["hv_netsvc"]
+ mock_get_mac.return_value = "00:11:22:33:44:55"
+
+ dsrc = self._get_ds(data)
+ # Represent empty response from network imds
+ self.m_get_metadata_from_imds.return_value = {}
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+
+ netconfig = dsrc.network_config
+ self.assertEqual(netconfig, fallback_config)
+ mock_fallback.assert_called_with(
+ blacklist_drivers=["mlx4_core", "mlx5_core"], config_driver=True
+ )
+
+ @mock.patch(MOCKPATH + "net.get_interfaces", autospec=True)
+ def test_blacklist_through_distro(self, m_net_get_interfaces):
+ """Verify Azure DS updates blacklist drivers in the distro's
+ networking object."""
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": {},
+ }
+
+ distro_cls = distros.fetch("ubuntu")
+ distro = distro_cls("ubuntu", {}, self.paths)
+ dsrc = self._get_ds(data, distro=distro)
+ dsrc.get_data()
+ self.assertEqual(
+ distro.networking.blacklist_drivers, dsaz.BLACKLIST_DRIVERS
+ )
+
+ distro.networking.get_interfaces_by_mac()
+ self.m_get_interfaces.assert_called_with(
+ blacklist_drivers=dsaz.BLACKLIST_DRIVERS
+ )
+
+ @mock.patch(
+ "cloudinit.sources.helpers.azure.OpenSSLManager.parse_certificates"
+ )
+ def test_get_public_ssh_keys_with_imds(self, m_parse_certificates):
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+ dsrc.setup(True)
+ ssh_keys = dsrc.get_public_ssh_keys()
+ self.assertEqual(ssh_keys, ["ssh-rsa key1"])
+ self.assertEqual(m_parse_certificates.call_count, 0)
+
+ def test_key_without_crlf_valid(self):
+ test_key = "ssh-rsa somerandomkeystuff some comment"
+ assert True is dsaz._key_is_openssh_formatted(test_key)
+
+ def test_key_with_crlf_invalid(self):
+ test_key = "ssh-rsa someran\r\ndomkeystuff some comment"
+ assert False is dsaz._key_is_openssh_formatted(test_key)
+
+ def test_key_endswith_crlf_valid(self):
+ test_key = "ssh-rsa somerandomkeystuff some comment\r\n"
+ assert True is dsaz._key_is_openssh_formatted(test_key)
+
+ @mock.patch(
+ "cloudinit.sources.helpers.azure.OpenSSLManager.parse_certificates"
+ )
+ @mock.patch(MOCKPATH + "get_metadata_from_imds")
+ def test_get_public_ssh_keys_with_no_openssh_format(
+ self, m_get_metadata_from_imds, m_parse_certificates
+ ):
+ imds_data = copy.deepcopy(NETWORK_METADATA)
+ imds_data["compute"]["publicKeys"][0]["keyData"] = "no-openssh-format"
+ m_get_metadata_from_imds.return_value = imds_data
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+ dsrc.setup(True)
+ ssh_keys = dsrc.get_public_ssh_keys()
+ self.assertEqual(ssh_keys, [])
+ self.assertEqual(m_parse_certificates.call_count, 0)
+
+ @mock.patch(MOCKPATH + "get_metadata_from_imds")
+ def test_get_public_ssh_keys_without_imds(self, m_get_metadata_from_imds):
+ m_get_metadata_from_imds.return_value = dict()
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+ dsrc = self._get_ds(data)
+ dsaz.get_metadata_from_fabric.return_value = ["key2"]
+ dsrc.get_data()
+ dsrc.setup(True)
+ ssh_keys = dsrc.get_public_ssh_keys()
+ self.assertEqual(ssh_keys, ["key2"])
+
+ @mock.patch(MOCKPATH + "get_metadata_from_imds")
+ def test_imds_api_version_wanted_nonexistent(
+ self, m_get_metadata_from_imds
+ ):
+ def get_metadata_from_imds_side_eff(*args, **kwargs):
+ if kwargs["api_version"] == dsaz.IMDS_VER_WANT:
+ raise url_helper.UrlError("No IMDS version", code=400)
+ return NETWORK_METADATA
+
+ m_get_metadata_from_imds.side_effect = get_metadata_from_imds_side_eff
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+ self.assertIsNotNone(dsrc.metadata)
+
+ assert m_get_metadata_from_imds.mock_calls == [
+ mock.call(
+ retries=0,
+ md_type=dsaz.MetadataType.ALL,
+ api_version="2021-08-01",
+ exc_cb=mock.ANY,
+ ),
+ mock.call(
+ retries=10,
+ md_type=dsaz.MetadataType.ALL,
+ api_version="2019-06-01",
+ exc_cb=mock.ANY,
+ infinite=False,
+ ),
+ ]
+
+ @mock.patch(
+ MOCKPATH + "get_metadata_from_imds", return_value=NETWORK_METADATA
+ )
+ def test_imds_api_version_wanted_exists(self, m_get_metadata_from_imds):
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+ self.assertIsNotNone(dsrc.metadata)
+
+ assert m_get_metadata_from_imds.mock_calls == [
+ mock.call(
+ retries=0,
+ md_type=dsaz.MetadataType.ALL,
+ api_version="2021-08-01",
+ exc_cb=mock.ANY,
+ )
+ ]
+
+ @mock.patch(MOCKPATH + "get_metadata_from_imds")
+ def test_hostname_from_imds(self, m_get_metadata_from_imds):
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+ imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA)
+ imds_data_with_os_profile["compute"]["osProfile"] = dict(
+ adminUsername="username1",
+ computerName="hostname1",
+ disablePasswordAuthentication="true",
+ )
+ m_get_metadata_from_imds.return_value = imds_data_with_os_profile
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+ self.assertEqual(dsrc.metadata["local-hostname"], "hostname1")
+
+ @mock.patch(MOCKPATH + "get_metadata_from_imds")
+ def test_username_from_imds(self, m_get_metadata_from_imds):
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+ imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA)
+ imds_data_with_os_profile["compute"]["osProfile"] = dict(
+ adminUsername="username1",
+ computerName="hostname1",
+ disablePasswordAuthentication="true",
+ )
+ m_get_metadata_from_imds.return_value = imds_data_with_os_profile
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+ self.assertEqual(
+ dsrc.cfg["system_info"]["default_user"]["name"], "username1"
+ )
+
+ @mock.patch(MOCKPATH + "get_metadata_from_imds")
+ def test_disable_password_from_imds(self, m_get_metadata_from_imds):
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+ imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA)
+ imds_data_with_os_profile["compute"]["osProfile"] = dict(
+ adminUsername="username1",
+ computerName="hostname1",
+ disablePasswordAuthentication="true",
+ )
+ m_get_metadata_from_imds.return_value = imds_data_with_os_profile
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+ self.assertTrue(dsrc.metadata["disable_password"])
+
+ @mock.patch(MOCKPATH + "get_metadata_from_imds")
+ def test_userdata_from_imds(self, m_get_metadata_from_imds):
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ odata = {"HostName": "myhost", "UserName": "myuser"}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+ userdata = "userdataImds"
+ imds_data = copy.deepcopy(NETWORK_METADATA)
+ imds_data["compute"]["osProfile"] = dict(
+ adminUsername="username1",
+ computerName="hostname1",
+ disablePasswordAuthentication="true",
+ )
+ imds_data["compute"]["userData"] = b64e(userdata)
+ m_get_metadata_from_imds.return_value = imds_data
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(dsrc.userdata_raw, userdata.encode("utf-8"))
+
+ @mock.patch(MOCKPATH + "get_metadata_from_imds")
+ def test_userdata_from_imds_with_customdata_from_OVF(
+ self, m_get_metadata_from_imds
+ ):
+ userdataOVF = "userdataOVF"
+ odata = {
+ "HostName": "myhost",
+ "UserName": "myuser",
+ "UserData": {"text": b64e(userdataOVF), "encoding": "base64"},
+ }
+ sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
+ data = {
+ "ovfcontent": construct_valid_ovf_env(data=odata),
+ "sys_cfg": sys_cfg,
+ }
+
+ userdataImds = "userdataImds"
+ imds_data = copy.deepcopy(NETWORK_METADATA)
+ imds_data["compute"]["osProfile"] = dict(
+ adminUsername="username1",
+ computerName="hostname1",
+ disablePasswordAuthentication="true",
+ )
+ imds_data["compute"]["userData"] = b64e(userdataImds)
+ m_get_metadata_from_imds.return_value = imds_data
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(dsrc.userdata_raw, userdataOVF.encode("utf-8"))
+
+
+class TestLoadAzureDsDir(CiTestCase):
+ """Tests for load_azure_ds_dir."""
+
+ def setUp(self):
+ self.source_dir = self.tmp_dir()
+ super(TestLoadAzureDsDir, self).setUp()
+
+ def test_missing_ovf_env_xml_raises_non_azure_datasource_error(self):
+ """load_azure_ds_dir raises an error When ovf-env.xml doesn't exit."""
+ with self.assertRaises(dsaz.NonAzureDataSource) as context_manager:
+ dsaz.load_azure_ds_dir(self.source_dir)
+ self.assertEqual(
+ "No ovf-env file found", str(context_manager.exception)
+ )
+
+ def test_wb_invalid_ovf_env_xml_calls_read_azure_ovf(self):
+ """load_azure_ds_dir calls read_azure_ovf to parse the xml."""
+ ovf_path = os.path.join(self.source_dir, "ovf-env.xml")
+ with open(ovf_path, "wb") as stream:
+ stream.write(b"invalid xml")
+ with self.assertRaises(dsaz.BrokenAzureDataSource) as context_manager:
+ dsaz.load_azure_ds_dir(self.source_dir)
+ self.assertEqual(
+ "Invalid ovf-env.xml: syntax error: line 1, column 0",
+ str(context_manager.exception),
+ )
+
+
+class TestReadAzureOvf(CiTestCase):
+ def test_invalid_xml_raises_non_azure_ds(self):
+ invalid_xml = "<foo>" + construct_valid_ovf_env(data={})
+ self.assertRaises(
+ dsaz.BrokenAzureDataSource, dsaz.read_azure_ovf, invalid_xml
+ )
+
+ def test_load_with_pubkeys(self):
+ mypklist = [{"fingerprint": "fp1", "path": "path1", "value": ""}]
+ pubkeys = [(x["fingerprint"], x["path"], x["value"]) for x in mypklist]
+ content = construct_valid_ovf_env(pubkeys=pubkeys)
+ (_md, _ud, cfg) = dsaz.read_azure_ovf(content)
+ for mypk in mypklist:
+ self.assertIn(mypk, cfg["_pubkeys"])
+
+
+class TestCanDevBeReformatted(CiTestCase):
+ warning_file = "dataloss_warning_readme.txt"
+
+ def _domock(self, mockpath, sattr=None):
+ patcher = mock.patch(mockpath)
+ setattr(self, sattr, patcher.start())
+ self.addCleanup(patcher.stop)
+
+ def patchup(self, devs):
+ bypath = {}
+ for path, data in devs.items():
+ bypath[path] = data
+ if "realpath" in data:
+ bypath[data["realpath"]] = data
+ for ppath, pdata in data.get("partitions", {}).items():
+ bypath[ppath] = pdata
+ if "realpath" in data:
+ bypath[pdata["realpath"]] = pdata
+
+ def realpath(d):
+ return bypath[d].get("realpath", d)
+
+ def partitions_on_device(devpath):
+ parts = bypath.get(devpath, {}).get("partitions", {})
+ ret = []
+ for path, data in parts.items():
+ ret.append((data.get("num"), realpath(path)))
+ # return sorted by partition number
+ return sorted(ret, key=lambda d: d[0])
+
+ def mount_cb(device, callback, mtype, update_env_for_mount):
+ self.assertEqual("ntfs", mtype)
+ self.assertEqual("C", update_env_for_mount.get("LANG"))
+ p = self.tmp_dir()
+ for f in bypath.get(device).get("files", []):
+ write_file(os.path.join(p, f), content=f)
+ return callback(p)
+
+ def has_ntfs_fs(device):
+ return bypath.get(device, {}).get("fs") == "ntfs"
+
+ p = MOCKPATH
+ self._domock(p + "_partitions_on_device", "m_partitions_on_device")
+ self._domock(p + "_has_ntfs_filesystem", "m_has_ntfs_filesystem")
+ self._domock(p + "util.mount_cb", "m_mount_cb")
+ self._domock(p + "os.path.realpath", "m_realpath")
+ self._domock(p + "os.path.exists", "m_exists")
+ self._domock(p + "util.SeLinuxGuard", "m_selguard")
+
+ self.m_exists.side_effect = lambda p: p in bypath
+ self.m_realpath.side_effect = realpath
+ self.m_has_ntfs_filesystem.side_effect = has_ntfs_fs
+ self.m_mount_cb.side_effect = mount_cb
+ self.m_partitions_on_device.side_effect = partitions_on_device
+ self.m_selguard.__enter__ = mock.Mock(return_value=False)
+ self.m_selguard.__exit__ = mock.Mock()
+
+ def test_three_partitions_is_false(self):
+ """A disk with 3 partitions can not be formatted."""
+ self.patchup(
+ {
+ "/dev/sda": {
+ "partitions": {
+ "/dev/sda1": {"num": 1},
+ "/dev/sda2": {"num": 2},
+ "/dev/sda3": {"num": 3},
+ }
+ }
+ }
+ )
+ value, msg = dsaz.can_dev_be_reformatted(
+ "/dev/sda", preserve_ntfs=False
+ )
+ self.assertFalse(value)
+ self.assertIn("3 or more", msg.lower())
+
+ def test_no_partitions_is_false(self):
+ """A disk with no partitions can not be formatted."""
+ self.patchup({"/dev/sda": {}})
+ value, msg = dsaz.can_dev_be_reformatted(
+ "/dev/sda", preserve_ntfs=False
+ )
+ self.assertFalse(value)
+ self.assertIn("not partitioned", msg.lower())
+
+ def test_two_partitions_not_ntfs_false(self):
+ """2 partitions and 2nd not ntfs can not be formatted."""
+ self.patchup(
+ {
+ "/dev/sda": {
+ "partitions": {
+ "/dev/sda1": {"num": 1},
+ "/dev/sda2": {"num": 2, "fs": "ext4", "files": []},
+ }
+ }
+ }
+ )
+ value, msg = dsaz.can_dev_be_reformatted(
+ "/dev/sda", preserve_ntfs=False
+ )
+ self.assertFalse(value)
+ self.assertIn("not ntfs", msg.lower())
+
+ def test_two_partitions_ntfs_populated_false(self):
+ """2 partitions and populated ntfs fs on 2nd can not be formatted."""
+ self.patchup(
+ {
+ "/dev/sda": {
+ "partitions": {
+ "/dev/sda1": {"num": 1},
+ "/dev/sda2": {
+ "num": 2,
+ "fs": "ntfs",
+ "files": ["secret.txt"],
+ },
+ }
+ }
+ }
+ )
+ value, msg = dsaz.can_dev_be_reformatted(
+ "/dev/sda", preserve_ntfs=False
+ )
+ self.assertFalse(value)
+ self.assertIn("files on it", msg.lower())
+
+ def test_two_partitions_ntfs_empty_is_true(self):
+ """2 partitions and empty ntfs fs on 2nd can be formatted."""
+ self.patchup(
+ {
+ "/dev/sda": {
+ "partitions": {
+ "/dev/sda1": {"num": 1},
+ "/dev/sda2": {"num": 2, "fs": "ntfs", "files": []},
+ }
+ }
+ }
+ )
+ value, msg = dsaz.can_dev_be_reformatted(
+ "/dev/sda", preserve_ntfs=False
+ )
+ self.assertTrue(value)
+ self.assertIn("safe for", msg.lower())
+
+ def test_one_partition_not_ntfs_false(self):
+ """1 partition witih fs other than ntfs can not be formatted."""
+ self.patchup(
+ {
+ "/dev/sda": {
+ "partitions": {
+ "/dev/sda1": {"num": 1, "fs": "zfs"},
+ }
+ }
+ }
+ )
+ value, msg = dsaz.can_dev_be_reformatted(
+ "/dev/sda", preserve_ntfs=False
+ )
+ self.assertFalse(value)
+ self.assertIn("not ntfs", msg.lower())
+
+ def test_one_partition_ntfs_populated_false(self):
+ """1 mountable ntfs partition with many files can not be formatted."""
+ self.patchup(
+ {
+ "/dev/sda": {
+ "partitions": {
+ "/dev/sda1": {
+ "num": 1,
+ "fs": "ntfs",
+ "files": ["file1.txt", "file2.exe"],
+ },
+ }
+ }
+ }
+ )
+ with mock.patch.object(dsaz.LOG, "warning") as warning:
+ value, msg = dsaz.can_dev_be_reformatted(
+ "/dev/sda", preserve_ntfs=False
+ )
+ wmsg = warning.call_args[0][0]
+ self.assertIn(
+ "looks like you're using NTFS on the ephemeral disk", wmsg
+ )
+ self.assertFalse(value)
+ self.assertIn("files on it", msg.lower())
+
+ def test_one_partition_ntfs_empty_is_true(self):
+ """1 mountable ntfs partition and no files can be formatted."""
+ self.patchup(
+ {
+ "/dev/sda": {
+ "partitions": {
+ "/dev/sda1": {"num": 1, "fs": "ntfs", "files": []}
+ }
+ }
+ }
+ )
+ value, msg = dsaz.can_dev_be_reformatted(
+ "/dev/sda", preserve_ntfs=False
+ )
+ self.assertTrue(value)
+ self.assertIn("safe for", msg.lower())
+
+ def test_one_partition_ntfs_empty_with_dataloss_file_is_true(self):
+ """1 mountable ntfs partition and only warn file can be formatted."""
+ self.patchup(
+ {
+ "/dev/sda": {
+ "partitions": {
+ "/dev/sda1": {
+ "num": 1,
+ "fs": "ntfs",
+ "files": ["dataloss_warning_readme.txt"],
+ }
+ }
+ }
+ }
+ )
+ value, msg = dsaz.can_dev_be_reformatted(
+ "/dev/sda", preserve_ntfs=False
+ )
+ self.assertTrue(value)
+ self.assertIn("safe for", msg.lower())
+
+ def test_one_partition_through_realpath_is_true(self):
+ """A symlink to a device with 1 ntfs partition can be formatted."""
+ epath = "/dev/disk/cloud/azure_resource"
+ self.patchup(
+ {
+ epath: {
+ "realpath": "/dev/sdb",
+ "partitions": {
+ epath
+ + "-part1": {
+ "num": 1,
+ "fs": "ntfs",
+ "files": [self.warning_file],
+ "realpath": "/dev/sdb1",
+ }
+ },
+ }
+ }
+ )
+ value, msg = dsaz.can_dev_be_reformatted(epath, preserve_ntfs=False)
+ self.assertTrue(value)
+ self.assertIn("safe for", msg.lower())
+
+ def test_three_partition_through_realpath_is_false(self):
+ """A symlink to a device with 3 partitions can not be formatted."""
+ epath = "/dev/disk/cloud/azure_resource"
+ self.patchup(
+ {
+ epath: {
+ "realpath": "/dev/sdb",
+ "partitions": {
+ epath
+ + "-part1": {
+ "num": 1,
+ "fs": "ntfs",
+ "files": [self.warning_file],
+ "realpath": "/dev/sdb1",
+ },
+ epath
+ + "-part2": {
+ "num": 2,
+ "fs": "ext3",
+ "realpath": "/dev/sdb2",
+ },
+ epath
+ + "-part3": {
+ "num": 3,
+ "fs": "ext",
+ "realpath": "/dev/sdb3",
+ },
+ },
+ }
+ }
+ )
+ value, msg = dsaz.can_dev_be_reformatted(epath, preserve_ntfs=False)
+ self.assertFalse(value)
+ self.assertIn("3 or more", msg.lower())
+
+ def test_ntfs_mount_errors_true(self):
+ """can_dev_be_reformatted does not fail if NTFS is unknown fstype."""
+ self.patchup(
+ {
+ "/dev/sda": {
+ "partitions": {
+ "/dev/sda1": {"num": 1, "fs": "ntfs", "files": []}
+ }
+ }
+ }
+ )
+
+ error_msgs = [
+ "Stderr: mount: unknown filesystem type 'ntfs'", # RHEL
+ "Stderr: mount: /dev/sdb1: unknown filesystem type 'ntfs'", # SLES
+ ]
+
+ for err_msg in error_msgs:
+ self.m_mount_cb.side_effect = MountFailedError(
+ "Failed mounting %s to %s due to: \nUnexpected.\n%s"
+ % ("/dev/sda", "/fake-tmp/dir", err_msg)
+ )
+
+ value, msg = dsaz.can_dev_be_reformatted(
+ "/dev/sda", preserve_ntfs=False
+ )
+ self.assertTrue(value)
+ self.assertIn("cannot mount NTFS, assuming", msg)
+
+ def test_never_destroy_ntfs_config_false(self):
+ """Normally formattable situation with never_destroy_ntfs set."""
+ self.patchup(
+ {
+ "/dev/sda": {
+ "partitions": {
+ "/dev/sda1": {
+ "num": 1,
+ "fs": "ntfs",
+ "files": ["dataloss_warning_readme.txt"],
+ }
+ }
+ }
+ }
+ )
+ value, msg = dsaz.can_dev_be_reformatted(
+ "/dev/sda", preserve_ntfs=True
+ )
+ self.assertFalse(value)
+ self.assertIn(
+ "config says to never destroy NTFS "
+ "(datasource.Azure.never_destroy_ntfs)",
+ msg,
+ )
+
+
+class TestClearCachedData(CiTestCase):
+ def test_clear_cached_attrs_clears_imds(self):
+ """All class attributes are reset to defaults, including imds data."""
+ tmp = self.tmp_dir()
+ paths = helpers.Paths({"cloud_dir": tmp, "run_dir": tmp})
+ dsrc = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=paths)
+ clean_values = [dsrc.metadata, dsrc.userdata, dsrc._metadata_imds]
+ dsrc.metadata = "md"
+ dsrc.userdata = "ud"
+ dsrc._metadata_imds = "imds"
+ dsrc._dirty_cache = True
+ dsrc.clear_cached_attrs()
+ self.assertEqual(
+ [dsrc.metadata, dsrc.userdata, dsrc._metadata_imds], clean_values
+ )
+
+
+class TestAzureNetExists(CiTestCase):
+ def test_azure_net_must_exist_for_legacy_objpkl(self):
+ """DataSourceAzureNet must exist for old obj.pkl files
+ that reference it."""
+ self.assertTrue(hasattr(dsaz, "DataSourceAzureNet"))
+
+
+class TestPreprovisioningReadAzureOvfFlag(CiTestCase):
+ def test_read_azure_ovf_with_true_flag(self):
+ """The read_azure_ovf method should set the PreprovisionedVM
+ cfg flag if the proper setting is present."""
+ content = construct_valid_ovf_env(
+ platform_settings={"PreprovisionedVm": "True"}
+ )
+ ret = dsaz.read_azure_ovf(content)
+ cfg = ret[2]
+ self.assertTrue(cfg["PreprovisionedVm"])
+
+ def test_read_azure_ovf_with_false_flag(self):
+ """The read_azure_ovf method should set the PreprovisionedVM
+ cfg flag to false if the proper setting is false."""
+ content = construct_valid_ovf_env(
+ platform_settings={"PreprovisionedVm": "False"}
+ )
+ ret = dsaz.read_azure_ovf(content)
+ cfg = ret[2]
+ self.assertFalse(cfg["PreprovisionedVm"])
+
+ def test_read_azure_ovf_without_flag(self):
+ """The read_azure_ovf method should not set the
+ PreprovisionedVM cfg flag."""
+ content = construct_valid_ovf_env()
+ ret = dsaz.read_azure_ovf(content)
+ cfg = ret[2]
+ self.assertFalse(cfg["PreprovisionedVm"])
+ self.assertEqual(None, cfg["PreprovisionedVMType"])
+
+ def test_read_azure_ovf_with_running_type(self):
+ """The read_azure_ovf method should set PreprovisionedVMType
+ cfg flag to Running."""
+ content = construct_valid_ovf_env(
+ platform_settings={
+ "PreprovisionedVMType": "Running",
+ "PreprovisionedVm": "True",
+ }
+ )
+ ret = dsaz.read_azure_ovf(content)
+ cfg = ret[2]
+ self.assertTrue(cfg["PreprovisionedVm"])
+ self.assertEqual("Running", cfg["PreprovisionedVMType"])
+
+ def test_read_azure_ovf_with_savable_type(self):
+ """The read_azure_ovf method should set PreprovisionedVMType
+ cfg flag to Savable."""
+ content = construct_valid_ovf_env(
+ platform_settings={
+ "PreprovisionedVMType": "Savable",
+ "PreprovisionedVm": "True",
+ }
+ )
+ ret = dsaz.read_azure_ovf(content)
+ cfg = ret[2]
+ self.assertTrue(cfg["PreprovisionedVm"])
+ self.assertEqual("Savable", cfg["PreprovisionedVMType"])
+
+
+@pytest.mark.parametrize(
+ "ovf_cfg,imds_md,pps_type",
+ [
+ (
+ {"PreprovisionedVm": False, "PreprovisionedVMType": None},
+ {},
+ dsaz.PPSType.NONE,
+ ),
+ (
+ {"PreprovisionedVm": True, "PreprovisionedVMType": "Running"},
+ {},
+ dsaz.PPSType.RUNNING,
+ ),
+ (
+ {"PreprovisionedVm": True, "PreprovisionedVMType": "Savable"},
+ {},
+ dsaz.PPSType.SAVABLE,
+ ),
+ (
+ {"PreprovisionedVm": True},
+ {},
+ dsaz.PPSType.RUNNING,
+ ),
+ (
+ {},
+ {"extended": {"compute": {"ppsType": "None"}}},
+ dsaz.PPSType.NONE,
+ ),
+ (
+ {},
+ {"extended": {"compute": {"ppsType": "Running"}}},
+ dsaz.PPSType.RUNNING,
+ ),
+ (
+ {},
+ {"extended": {"compute": {"ppsType": "Savable"}}},
+ dsaz.PPSType.SAVABLE,
+ ),
+ (
+ {"PreprovisionedVm": False, "PreprovisionedVMType": None},
+ {"extended": {"compute": {"ppsType": "None"}}},
+ dsaz.PPSType.NONE,
+ ),
+ (
+ {"PreprovisionedVm": True, "PreprovisionedVMType": "Running"},
+ {"extended": {"compute": {"ppsType": "Running"}}},
+ dsaz.PPSType.RUNNING,
+ ),
+ (
+ {"PreprovisionedVm": True, "PreprovisionedVMType": "Savable"},
+ {"extended": {"compute": {"ppsType": "Savable"}}},
+ dsaz.PPSType.SAVABLE,
+ ),
+ (
+ {"PreprovisionedVm": True},
+ {"extended": {"compute": {"ppsType": "Running"}}},
+ dsaz.PPSType.RUNNING,
+ ),
+ ],
+)
+class TestDeterminePPSTypeScenarios:
+ @mock.patch("os.path.isfile", return_value=False)
+ def test_determine_pps_without_reprovision_marker(
+ self, is_file, azure_ds, ovf_cfg, imds_md, pps_type
+ ):
+ assert azure_ds._determine_pps_type(ovf_cfg, imds_md) == pps_type
+
+ @mock.patch("os.path.isfile", return_value=True)
+ def test_determine_pps_with_reprovision_marker(
+ self, is_file, azure_ds, ovf_cfg, imds_md, pps_type
+ ):
+ assert (
+ azure_ds._determine_pps_type(ovf_cfg, imds_md)
+ == dsaz.PPSType.UNKNOWN
+ )
+ assert is_file.mock_calls == [mock.call(dsaz.REPROVISION_MARKER_FILE)]
+
+
+@mock.patch("os.path.isfile", return_value=False)
+class TestReprovision(CiTestCase):
+ def setUp(self):
+ super(TestReprovision, self).setUp()
+ tmp = self.tmp_dir()
+ self.waagent_d = self.tmp_path("/var/lib/waagent", tmp)
+ self.paths = helpers.Paths({"cloud_dir": tmp})
+ dsaz.BUILTIN_DS_CONFIG["data_dir"] = self.waagent_d
+
+ @mock.patch(MOCKPATH + "DataSourceAzure._poll_imds")
+ def test_reprovision_calls__poll_imds(self, _poll_imds, isfile):
+ """_reprovision will poll IMDS."""
+ isfile.return_value = False
+ hostname = "myhost"
+ username = "myuser"
+ odata = {"HostName": hostname, "UserName": username}
+ _poll_imds.return_value = construct_valid_ovf_env(data=odata)
+ dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
+ dsa._reprovision()
+ _poll_imds.assert_called_with()
+
+
+class TestPreprovisioningHotAttachNics(CiTestCase):
+ def setUp(self):
+ super(TestPreprovisioningHotAttachNics, self).setUp()
+ self.tmp = self.tmp_dir()
+ self.waagent_d = self.tmp_path("/var/lib/waagent", self.tmp)
+ self.paths = helpers.Paths({"cloud_dir": self.tmp})
+ dsaz.BUILTIN_DS_CONFIG["data_dir"] = self.waagent_d
+ self.paths = helpers.Paths({"cloud_dir": self.tmp})
+
+ @mock.patch(
+ "cloudinit.sources.helpers.netlink.wait_for_nic_detach_event",
+ autospec=True,
+ )
+ @mock.patch(MOCKPATH + "util.write_file", autospec=True)
+ def test_nic_detach_writes_marker(self, m_writefile, m_detach):
+ """When we detect that a nic gets detached, we write a marker for it"""
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ nl_sock = mock.MagicMock()
+ dsa._wait_for_nic_detach(nl_sock)
+ m_detach.assert_called_with(nl_sock)
+ self.assertEqual(1, m_detach.call_count)
+ m_writefile.assert_called_with(
+ dsaz.REPROVISION_NIC_DETACHED_MARKER_FILE, mock.ANY
+ )
+
+ @mock.patch(MOCKPATH + "util.write_file", autospec=True)
+ @mock.patch(MOCKPATH + "DataSourceAzure.fallback_interface")
+ @mock.patch(MOCKPATH + "DataSourceAzure._report_ready")
+ @mock.patch(MOCKPATH + "DataSourceAzure._wait_for_nic_detach")
+ def test_detect_nic_attach_reports_ready_and_waits_for_detach(
+ self, m_detach, m_report_ready, m_fallback_if, m_writefile
+ ):
+ """Report ready first and then wait for nic detach"""
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ dsa._wait_for_all_nics_ready()
+ m_fallback_if.return_value = "Dummy interface"
+ self.assertEqual(1, m_report_ready.call_count)
+ self.assertEqual(1, m_detach.call_count)
+ self.assertEqual(1, m_writefile.call_count)
+ m_writefile.assert_called_with(
+ dsaz.REPORTED_READY_MARKER_FILE, mock.ANY
+ )
+
+ @mock.patch("os.path.isfile")
+ @mock.patch(MOCKPATH + "DataSourceAzure.fallback_interface")
+ @mock.patch(MOCKPATH + "EphemeralDHCPv4", autospec=True)
+ @mock.patch(MOCKPATH + "DataSourceAzure._report_ready")
+ @mock.patch(MOCKPATH + "DataSourceAzure._wait_for_nic_detach")
+ def test_detect_nic_attach_skips_report_ready_when_marker_present(
+ self, m_detach, m_report_ready, m_dhcp, m_fallback_if, m_isfile
+ ):
+ """Skip reporting ready if we already have a marker file."""
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+
+ def isfile(key):
+ return key == dsaz.REPORTED_READY_MARKER_FILE
+
+ m_isfile.side_effect = isfile
+ dsa._wait_for_all_nics_ready()
+ m_fallback_if.return_value = "Dummy interface"
+ self.assertEqual(0, m_report_ready.call_count)
+ self.assertEqual(0, m_dhcp.call_count)
+ self.assertEqual(1, m_detach.call_count)
+
+ @mock.patch("os.path.isfile")
+ @mock.patch(MOCKPATH + "DataSourceAzure.fallback_interface")
+ @mock.patch(MOCKPATH + "EphemeralDHCPv4")
+ @mock.patch(MOCKPATH + "DataSourceAzure._report_ready")
+ @mock.patch(MOCKPATH + "DataSourceAzure._wait_for_nic_detach")
+ def test_detect_nic_attach_skips_nic_detach_when_marker_present(
+ self, m_detach, m_report_ready, m_dhcp, m_fallback_if, m_isfile
+ ):
+ """Skip wait for nic detach if it already happened."""
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+
+ m_isfile.return_value = True
+ dsa._wait_for_all_nics_ready()
+ m_fallback_if.return_value = "Dummy interface"
+ self.assertEqual(0, m_report_ready.call_count)
+ self.assertEqual(0, m_dhcp.call_count)
+ self.assertEqual(0, m_detach.call_count)
+
+ @mock.patch(MOCKPATH + "DataSourceAzure.wait_for_link_up", autospec=True)
+ @mock.patch("cloudinit.sources.helpers.netlink.wait_for_nic_attach_event")
+ @mock.patch("cloudinit.sources.net.find_fallback_nic")
+ @mock.patch(MOCKPATH + "get_metadata_from_imds")
+ @mock.patch(MOCKPATH + "EphemeralDHCPv4", autospec=True)
+ @mock.patch(MOCKPATH + "DataSourceAzure._wait_for_nic_detach")
+ @mock.patch("os.path.isfile")
+ def test_wait_for_nic_attach_if_no_fallback_interface(
+ self,
+ m_isfile,
+ m_detach,
+ m_dhcpv4,
+ m_imds,
+ m_fallback_if,
+ m_attach,
+ m_link_up,
+ ):
+ """Wait for nic attach if we do not have a fallback interface"""
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ lease = {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ "unknown-245": "624c3620",
+ }
+
+ m_isfile.return_value = True
+ m_attach.return_value = "eth0"
+ dhcp_ctx = mock.MagicMock(lease=lease)
+ dhcp_ctx.obtain_lease.return_value = lease
+ m_dhcpv4.return_value = dhcp_ctx
+ m_imds.return_value = IMDS_NETWORK_METADATA
+ m_fallback_if.return_value = None
+
+ dsa._wait_for_all_nics_ready()
+
+ self.assertEqual(0, m_detach.call_count)
+ self.assertEqual(1, m_attach.call_count)
+ self.assertEqual(1, m_dhcpv4.call_count)
+ self.assertEqual(1, m_imds.call_count)
+ self.assertEqual(1, m_link_up.call_count)
+ m_link_up.assert_called_with(mock.ANY, "eth0")
+
+ @mock.patch(MOCKPATH + "DataSourceAzure.wait_for_link_up")
+ @mock.patch("cloudinit.sources.helpers.netlink.wait_for_nic_attach_event")
+ @mock.patch("cloudinit.sources.net.find_fallback_nic")
+ @mock.patch(MOCKPATH + "DataSourceAzure.get_imds_data_with_api_fallback")
+ @mock.patch(MOCKPATH + "EphemeralDHCPv4", autospec=True)
+ @mock.patch(MOCKPATH + "DataSourceAzure._wait_for_nic_detach")
+ @mock.patch("os.path.isfile")
+ def test_wait_for_nic_attach_multinic_attach(
+ self,
+ m_isfile,
+ m_detach,
+ m_dhcpv4,
+ m_imds,
+ m_fallback_if,
+ m_attach,
+ m_link_up,
+ ):
+ """Wait for nic attach if we do not have a fallback interface"""
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ lease = {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ "unknown-245": "624c3620",
+ }
+
+ # Simulate two NICs by adding the same one twice.
+ md = {
+ "interface": [
+ IMDS_NETWORK_METADATA["interface"][0],
+ IMDS_NETWORK_METADATA["interface"][0],
+ ]
+ }
+
+ m_isfile.return_value = True
+ m_attach.side_effect = [
+ "eth0",
+ "eth1",
+ ]
+ dhcp_ctx = mock.MagicMock(lease=lease)
+ dhcp_ctx.obtain_lease.return_value = lease
+ m_dhcpv4.return_value = dhcp_ctx
+ m_imds.side_effect = [md]
+ m_fallback_if.return_value = None
+
+ dsa._wait_for_all_nics_ready()
+
+ self.assertEqual(0, m_detach.call_count)
+ self.assertEqual(2, m_attach.call_count)
+ # DHCP and network metadata calls will only happen on the primary NIC.
+ self.assertEqual(1, m_dhcpv4.call_count)
+ self.assertEqual(1, m_imds.call_count)
+ self.assertEqual(2, m_link_up.call_count)
+
+ @mock.patch("cloudinit.url_helper.time.sleep", autospec=True)
+ @mock.patch("requests.Session.request", autospec=True)
+ @mock.patch(MOCKPATH + "EphemeralDHCPv4", autospec=True)
+ def test_check_if_nic_is_primary_retries_on_failures(
+ self, m_dhcpv4, m_request, m_sleep
+ ):
+ """Retry polling for network metadata on all failures except timeout
+ and network unreachable errors"""
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ lease = {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ "unknown-245": "624c3620",
+ }
+
+ # Simulate two NICs by adding the same one twice.
+ md = {
+ "interface": [
+ IMDS_NETWORK_METADATA["interface"][0],
+ IMDS_NETWORK_METADATA["interface"][0],
+ ]
+ }
+
+ m_req = mock.Mock(content=json.dumps(md))
+ m_request.side_effect = [
+ requests.Timeout("Fake connection timeout"),
+ requests.ConnectionError("Fake Network Unreachable"),
+ m_req,
+ ]
+ m_dhcpv4.return_value.lease = lease
+
+ is_primary, expected_nic_count = dsa._check_if_nic_is_primary("eth0")
+ self.assertEqual(True, is_primary)
+ self.assertEqual(2, expected_nic_count)
+ assert len(m_request.mock_calls) == 3
+
+ # Re-run tests to verify max retries.
+ m_request.reset_mock()
+ m_request.side_effect = [
+ requests.Timeout("Fake connection timeout")
+ ] * 6 + [requests.ConnectionError("Fake Network Unreachable")] * 6
+
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+
+ is_primary, expected_nic_count = dsa._check_if_nic_is_primary("eth1")
+ self.assertEqual(False, is_primary)
+ assert len(m_request.mock_calls) == 11
+
+ @mock.patch("cloudinit.distros.networking.LinuxNetworking.try_set_link_up")
+ def test_wait_for_link_up_returns_if_already_up(self, m_is_link_up):
+ """Waiting for link to be up should return immediately if the link is
+ already up."""
+
+ distro_cls = distros.fetch("ubuntu")
+ distro = distro_cls("ubuntu", {}, self.paths)
+ dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths)
+ m_is_link_up.return_value = True
+
+ dsa.wait_for_link_up("eth0")
+ self.assertEqual(1, m_is_link_up.call_count)
+
+ @mock.patch(MOCKPATH + "net.is_up", autospec=True)
+ @mock.patch(MOCKPATH + "util.write_file")
+ @mock.patch("cloudinit.net.read_sys_net", return_value="device-id")
+ @mock.patch("cloudinit.distros.networking.LinuxNetworking.try_set_link_up")
+ def test_wait_for_link_up_checks_link_after_sleep(
+ self, m_try_set_link_up, m_read_sys_net, m_writefile, m_is_up
+ ):
+ """Waiting for link to be up should return immediately if the link is
+ already up."""
+
+ distro_cls = distros.fetch("ubuntu")
+ distro = distro_cls("ubuntu", {}, self.paths)
+ dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths)
+ m_try_set_link_up.return_value = False
+
+ callcount = 0
+
+ def is_up_mock(key):
+ nonlocal callcount
+ if callcount == 0:
+ callcount += 1
+ return False
+ return True
+
+ m_is_up.side_effect = is_up_mock
+
+ with mock.patch("cloudinit.sources.DataSourceAzure.sleep"):
+ dsa.wait_for_link_up("eth0")
+ self.assertEqual(2, m_try_set_link_up.call_count)
+ self.assertEqual(2, m_is_up.call_count)
+
+ @mock.patch(MOCKPATH + "util.write_file")
+ @mock.patch("cloudinit.net.read_sys_net", return_value="device-id")
+ @mock.patch("cloudinit.distros.networking.LinuxNetworking.try_set_link_up")
+ def test_wait_for_link_up_writes_to_device_file(
+ self, m_is_link_up, m_read_sys_net, m_writefile
+ ):
+ """Waiting for link to be up should return immediately if the link is
+ already up."""
+
+ distro_cls = distros.fetch("ubuntu")
+ distro = distro_cls("ubuntu", {}, self.paths)
+ dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths)
+
+ callcount = 0
+
+ def linkup(key):
+ nonlocal callcount
+ if callcount == 0:
+ callcount += 1
+ return False
+ return True
+
+ m_is_link_up.side_effect = linkup
+
+ dsa.wait_for_link_up("eth0")
+ self.assertEqual(2, m_is_link_up.call_count)
+ self.assertEqual(1, m_read_sys_net.call_count)
+ self.assertEqual(2, m_writefile.call_count)
+
+ @mock.patch(
+ "cloudinit.sources.helpers.netlink.create_bound_netlink_socket"
+ )
+ def test_wait_for_all_nics_ready_raises_if_socket_fails(self, m_socket):
+ """Waiting for all nics should raise exception if netlink socket
+ creation fails."""
+
+ m_socket.side_effect = netlink.NetlinkCreateSocketError
+ distro_cls = distros.fetch("ubuntu")
+ distro = distro_cls("ubuntu", {}, self.paths)
+ dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths)
+
+ self.assertRaises(
+ netlink.NetlinkCreateSocketError, dsa._wait_for_all_nics_ready
+ )
+ # dsa._wait_for_all_nics_ready()
+
+
+@mock.patch("cloudinit.net.find_fallback_nic", return_value="eth9")
+@mock.patch("cloudinit.net.dhcp.EphemeralIPv4Network")
+@mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+@mock.patch(
+ "cloudinit.sources.helpers.netlink.wait_for_media_disconnect_connect"
+)
+@mock.patch("requests.Session.request")
+@mock.patch(MOCKPATH + "DataSourceAzure._report_ready", return_value=True)
+class TestPreprovisioningPollIMDS(CiTestCase):
+ def setUp(self):
+ super(TestPreprovisioningPollIMDS, self).setUp()
+ self.tmp = self.tmp_dir()
+ self.waagent_d = self.tmp_path("/var/lib/waagent", self.tmp)
+ self.paths = helpers.Paths({"cloud_dir": self.tmp})
+ dsaz.BUILTIN_DS_CONFIG["data_dir"] = self.waagent_d
+
+ @mock.patch("time.sleep", mock.MagicMock())
+ def test_poll_imds_re_dhcp_on_timeout(
+ self,
+ m_report_ready,
+ m_request,
+ m_media_switch,
+ m_dhcp,
+ m_net,
+ m_fallback,
+ ):
+ """The poll_imds will retry DHCP on IMDS timeout."""
+ report_file = self.tmp_path("report_marker", self.tmp)
+ lease = {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ "unknown-245": "624c3620",
+ }
+ m_dhcp.return_value = [lease]
+ m_media_switch.return_value = None
+ dhcp_ctx = mock.MagicMock(lease=lease)
+ dhcp_ctx.obtain_lease.return_value = lease
+
+ self.tries = 0
+
+ def fake_timeout_once(**kwargs):
+ self.tries += 1
+ if self.tries == 1:
+ raise requests.Timeout("Fake connection timeout")
+ elif self.tries in (2, 3):
+ response = requests.Response()
+ response.status_code = 404 if self.tries == 2 else 410
+ raise requests.exceptions.HTTPError(
+ "fake {}".format(response.status_code), response=response
+ )
+ # Third try should succeed and stop retries or redhcp
+ return mock.MagicMock(status_code=200, text="good", content="good")
+
+ m_request.side_effect = fake_timeout_once
+
+ dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
+ with mock.patch(MOCKPATH + "REPORTED_READY_MARKER_FILE", report_file):
+ dsa._poll_imds()
+
+ assert m_report_ready.mock_calls == [mock.call()]
+
+ self.assertEqual(3, m_dhcp.call_count, "Expected 3 DHCP calls")
+ self.assertEqual(4, self.tries, "Expected 4 total reads from IMDS")
+
+ @mock.patch("os.path.isfile")
+ def test_poll_imds_skips_dhcp_if_ctx_present(
+ self,
+ m_isfile,
+ report_ready_func,
+ fake_resp,
+ m_media_switch,
+ m_dhcp,
+ m_net,
+ m_fallback,
+ ):
+ """The poll_imds function should reuse the dhcp ctx if it is already
+ present. This happens when we wait for nic to be hot-attached before
+ polling for reprovisiondata. Note that if this ctx is set when
+ _poll_imds is called, then it is not expected to be waiting for
+ media_disconnect_connect either."""
+ report_file = self.tmp_path("report_marker", self.tmp)
+ m_isfile.return_value = True
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ dsa._ephemeral_dhcp_ctx = mock.Mock(lease={})
+ with mock.patch(MOCKPATH + "REPORTED_READY_MARKER_FILE", report_file):
+ dsa._poll_imds()
+ self.assertEqual(0, m_dhcp.call_count)
+ self.assertEqual(0, m_media_switch.call_count)
+
+ @mock.patch("os.path.isfile")
+ @mock.patch(MOCKPATH + "EphemeralDHCPv4", autospec=True)
+ def test_poll_imds_does_dhcp_on_retries_if_ctx_present(
+ self,
+ m_ephemeral_dhcpv4,
+ m_isfile,
+ report_ready_func,
+ m_request,
+ m_media_switch,
+ m_dhcp,
+ m_net,
+ m_fallback,
+ ):
+ """The poll_imds function should reuse the dhcp ctx if it is already
+ present. This happens when we wait for nic to be hot-attached before
+ polling for reprovisiondata. Note that if this ctx is set when
+ _poll_imds is called, then it is not expected to be waiting for
+ media_disconnect_connect either."""
+
+ tries = 0
+
+ def fake_timeout_once(**kwargs):
+ nonlocal tries
+ tries += 1
+ if tries == 1:
+ raise requests.Timeout("Fake connection timeout")
+ return mock.MagicMock(status_code=200, text="good", content="good")
+
+ m_request.side_effect = fake_timeout_once
+ report_file = self.tmp_path("report_marker", self.tmp)
+ m_isfile.return_value = True
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ with mock.patch(
+ MOCKPATH + "REPORTED_READY_MARKER_FILE", report_file
+ ), mock.patch.object(dsa, "_ephemeral_dhcp_ctx") as m_dhcp_ctx:
+ m_dhcp_ctx.obtain_lease.return_value = "Dummy lease"
+ dsa._ephemeral_dhcp_ctx = m_dhcp_ctx
+ dsa._poll_imds()
+ self.assertEqual(1, m_dhcp_ctx.clean_network.call_count)
+ self.assertEqual(1, m_ephemeral_dhcpv4.call_count)
+ self.assertEqual(0, m_media_switch.call_count)
+ self.assertEqual(2, m_request.call_count)
+
+ def test_does_not_poll_imds_report_ready_when_marker_file_exists(
+ self,
+ m_report_ready,
+ m_request,
+ m_media_switch,
+ m_dhcp,
+ m_net,
+ m_fallback,
+ ):
+ """poll_imds should not call report ready when the reported ready
+ marker file exists"""
+ report_file = self.tmp_path("report_marker", self.tmp)
+ write_file(report_file, content="dont run report_ready :)")
+ m_dhcp.return_value = [
+ {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ "unknown-245": "624c3620",
+ }
+ ]
+ m_media_switch.return_value = None
+ dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
+ with mock.patch(MOCKPATH + "REPORTED_READY_MARKER_FILE", report_file):
+ dsa._poll_imds()
+ self.assertEqual(m_report_ready.call_count, 0)
+
+ def test_poll_imds_report_ready_success_writes_marker_file(
+ self,
+ m_report_ready,
+ m_request,
+ m_media_switch,
+ m_dhcp,
+ m_net,
+ m_fallback,
+ ):
+ """poll_imds should write the report_ready marker file if
+ reporting ready succeeds"""
+ report_file = self.tmp_path("report_marker", self.tmp)
+ m_dhcp.return_value = [
+ {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ "unknown-245": "624c3620",
+ }
+ ]
+ m_media_switch.return_value = None
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ self.assertFalse(os.path.exists(report_file))
+ with mock.patch(MOCKPATH + "REPORTED_READY_MARKER_FILE", report_file):
+ dsa._poll_imds()
+ self.assertEqual(m_report_ready.call_count, 1)
+ self.assertTrue(os.path.exists(report_file))
+
+ def test_poll_imds_report_ready_failure_raises_exc_and_doesnt_write_marker(
+ self,
+ m_report_ready,
+ m_request,
+ m_media_switch,
+ m_dhcp,
+ m_net,
+ m_fallback,
+ ):
+ """poll_imds should write the report_ready marker file if
+ reporting ready succeeds"""
+ report_file = self.tmp_path("report_marker", self.tmp)
+ m_dhcp.return_value = [
+ {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ "unknown-245": "624c3620",
+ }
+ ]
+ m_media_switch.return_value = None
+ m_report_ready.side_effect = [Exception("fail")]
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ self.assertFalse(os.path.exists(report_file))
+ with mock.patch(MOCKPATH + "REPORTED_READY_MARKER_FILE", report_file):
+ self.assertRaises(InvalidMetaDataException, dsa._poll_imds)
+ self.assertEqual(m_report_ready.call_count, 1)
+ self.assertFalse(os.path.exists(report_file))
+
+
+@mock.patch(MOCKPATH + "DataSourceAzure._report_ready", mock.MagicMock())
+@mock.patch(MOCKPATH + "subp.subp", mock.MagicMock())
+@mock.patch(MOCKPATH + "util.write_file", mock.MagicMock())
+@mock.patch(
+ "cloudinit.sources.helpers.netlink.wait_for_media_disconnect_connect"
+)
+@mock.patch("cloudinit.net.dhcp.EphemeralIPv4Network", autospec=True)
+@mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+@mock.patch("requests.Session.request")
+class TestAzureDataSourcePreprovisioning(CiTestCase):
+ def setUp(self):
+ super(TestAzureDataSourcePreprovisioning, self).setUp()
+ tmp = self.tmp_dir()
+ self.waagent_d = self.tmp_path("/var/lib/waagent", tmp)
+ self.paths = helpers.Paths({"cloud_dir": tmp})
+ dsaz.BUILTIN_DS_CONFIG["data_dir"] = self.waagent_d
+
+ def test_poll_imds_returns_ovf_env(
+ self, m_request, m_dhcp, m_net, m_media_switch
+ ):
+ """The _poll_imds method should return the ovf_env.xml."""
+ m_media_switch.return_value = None
+ m_dhcp.return_value = [
+ {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ }
+ ]
+ url = "http://{0}/metadata/reprovisiondata?api-version=2019-06-01"
+ host = "169.254.169.254"
+ full_url = url.format(host)
+ m_request.return_value = mock.MagicMock(
+ status_code=200, text="ovf", content="ovf"
+ )
+ dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
+ self.assertTrue(len(dsa._poll_imds()) > 0)
+ self.assertEqual(
+ m_request.call_args_list,
+ [
+ mock.call(
+ allow_redirects=True,
+ headers={
+ "Metadata": "true",
+ "User-Agent": "Cloud-Init/%s" % vs(),
+ },
+ method="GET",
+ timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS,
+ url=full_url,
+ )
+ ],
+ )
+ self.assertEqual(m_dhcp.call_count, 2)
+ m_net.assert_any_call(
+ broadcast="192.168.2.255",
+ interface="eth9",
+ ip="192.168.2.9",
+ prefix_or_mask="255.255.255.0",
+ router="192.168.2.1",
+ static_routes=None,
+ )
+ self.assertEqual(m_net.call_count, 2)
+
+ def test__reprovision_calls__poll_imds(
+ self, m_request, m_dhcp, m_net, m_media_switch
+ ):
+ """The _reprovision method should call poll IMDS."""
+ m_media_switch.return_value = None
+ m_dhcp.return_value = [
+ {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ "unknown-245": "624c3620",
+ }
+ ]
+ url = "http://{0}/metadata/reprovisiondata?api-version=2019-06-01"
+ host = "169.254.169.254"
+ full_url = url.format(host)
+ hostname = "myhost"
+ username = "myuser"
+ odata = {"HostName": hostname, "UserName": username}
+ content = construct_valid_ovf_env(data=odata)
+ m_request.return_value = mock.MagicMock(
+ status_code=200, text=content, content=content
+ )
+ dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
+ md, _ud, cfg, _d = dsa._reprovision()
+ self.assertEqual(md["local-hostname"], hostname)
+ self.assertEqual(cfg["system_info"]["default_user"]["name"], username)
+ self.assertIn(
+ mock.call(
+ allow_redirects=True,
+ headers={
+ "Metadata": "true",
+ "User-Agent": "Cloud-Init/%s" % vs(),
+ },
+ method="GET",
+ timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS,
+ url=full_url,
+ ),
+ m_request.call_args_list,
+ )
+ self.assertEqual(m_dhcp.call_count, 2)
+ m_net.assert_any_call(
+ broadcast="192.168.2.255",
+ interface="eth9",
+ ip="192.168.2.9",
+ prefix_or_mask="255.255.255.0",
+ router="192.168.2.1",
+ static_routes=None,
+ )
+ self.assertEqual(m_net.call_count, 2)
+
+
+class TestRemoveUbuntuNetworkConfigScripts(CiTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestRemoveUbuntuNetworkConfigScripts, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ def test_remove_network_scripts_removes_both_files_and_directories(self):
+ """Any files or directories in paths are removed when present."""
+ file1 = self.tmp_path("file1", dir=self.tmp)
+ subdir = self.tmp_path("sub1", dir=self.tmp)
+ subfile = self.tmp_path("leaf1", dir=subdir)
+ write_file(file1, "file1content")
+ write_file(subfile, "leafcontent")
+ dsaz.maybe_remove_ubuntu_network_config_scripts(paths=[subdir, file1])
+
+ for path in (file1, subdir, subfile):
+ self.assertFalse(
+ os.path.exists(path), "Found unremoved: %s" % path
+ )
+
+ expected_logs = [
+ "INFO: Removing Ubuntu extended network scripts because cloud-init"
+ " updates Azure network configuration on the following events:"
+ " ['boot', 'boot-legacy']",
+ "Recursively deleting %s" % subdir,
+ "Attempting to remove %s" % file1,
+ ]
+ for log in expected_logs:
+ self.assertIn(log, self.logs.getvalue())
+
+ def test_remove_network_scripts_only_attempts_removal_if_path_exists(self):
+ """Any files or directories absent are skipped without error."""
+ dsaz.maybe_remove_ubuntu_network_config_scripts(
+ paths=[
+ self.tmp_path("nodirhere/", dir=self.tmp),
+ self.tmp_path("notfilehere", dir=self.tmp),
+ ]
+ )
+ self.assertNotIn("/not/a", self.logs.getvalue()) # No delete logs
+
+ @mock.patch(MOCKPATH + "os.path.exists")
+ def test_remove_network_scripts_default_removes_stock_scripts(
+ self, m_exists
+ ):
+ """Azure's stock ubuntu image scripts and artifacts are removed."""
+ # Report path absent on all to avoid delete operation
+ m_exists.return_value = False
+ dsaz.maybe_remove_ubuntu_network_config_scripts()
+ calls = m_exists.call_args_list
+ for path in dsaz.UBUNTU_EXTENDED_NETWORK_SCRIPTS:
+ self.assertIn(mock.call(path), calls)
+
+
+class TestWBIsPlatformViable(CiTestCase):
+ """White box tests for _is_platform_viable."""
+
+ with_logs = True
+
+ @mock.patch(MOCKPATH + "dmi.read_dmi_data")
+ def test_true_on_non_azure_chassis(self, m_read_dmi_data):
+ """Return True if DMI chassis-asset-tag is AZURE_CHASSIS_ASSET_TAG."""
+ m_read_dmi_data.return_value = dsaz.AZURE_CHASSIS_ASSET_TAG
+ self.assertTrue(dsaz._is_platform_viable("doesnotmatter"))
+
+ @mock.patch(MOCKPATH + "os.path.exists")
+ @mock.patch(MOCKPATH + "dmi.read_dmi_data")
+ def test_true_on_azure_ovf_env_in_seed_dir(self, m_read_dmi_data, m_exist):
+ """Return True if ovf-env.xml exists in known seed dirs."""
+ # Non-matching Azure chassis-asset-tag
+ m_read_dmi_data.return_value = dsaz.AZURE_CHASSIS_ASSET_TAG + "X"
+
+ m_exist.return_value = True
+ self.assertTrue(dsaz._is_platform_viable("/some/seed/dir"))
+ m_exist.called_once_with("/other/seed/dir")
+
+ def test_false_on_no_matching_azure_criteria(self):
+ """Report non-azure on unmatched asset tag, ovf-env absent and no dev.
+
+ Return False when the asset tag doesn't match Azure's static
+ AZURE_CHASSIS_ASSET_TAG, no ovf-env.xml files exist in known seed dirs
+ and no devices have a label starting with prefix 'rd_rdfe_'.
+ """
+ self.assertFalse(
+ wrap_and_call(
+ MOCKPATH,
+ {
+ "os.path.exists": False,
+ # Non-matching Azure chassis-asset-tag
+ "dmi.read_dmi_data": dsaz.AZURE_CHASSIS_ASSET_TAG + "X",
+ "subp.which": None,
+ },
+ dsaz._is_platform_viable,
+ "doesnotmatter",
+ )
+ )
+ self.assertIn(
+ "DEBUG: Non-Azure DMI asset tag '{0}' discovered.\n".format(
+ dsaz.AZURE_CHASSIS_ASSET_TAG + "X"
+ ),
+ self.logs.getvalue(),
+ )
+
+
+class TestRandomSeed(CiTestCase):
+ """Test proper handling of random_seed"""
+
+ def test_non_ascii_seed_is_serializable(self):
+ """Pass if a random string from the Azure infrastructure which
+ contains at least one non-Unicode character can be converted to/from
+ JSON without alteration and without throwing an exception.
+ """
+ path = resourceLocation("azure/non_unicode_random_string")
+ result = dsaz._get_random_seed(path)
+
+ obj = {"seed": result}
+ try:
+ serialized = json_dumps(obj)
+ deserialized = load_json(serialized)
+ except UnicodeDecodeError:
+ self.fail("Non-serializable random seed returned")
+
+ self.assertEqual(deserialized["seed"], result)
+
+
+class TestProvisioning:
+ @pytest.fixture(autouse=True)
+ def provisioning_setup(
+ self,
+ azure_ds,
+ mock_azure_get_metadata_from_fabric,
+ mock_azure_report_failure_to_fabric,
+ mock_net_dhcp_maybe_perform_dhcp_discovery,
+ mock_net_dhcp_EphemeralIPv4Network,
+ mock_dmi_read_dmi_data,
+ mock_get_interfaces,
+ mock_get_interface_mac,
+ mock_netlink,
+ mock_os_path_isfile,
+ mock_readurl,
+ mock_subp_subp,
+ mock_util_ensure_dir,
+ mock_util_find_devs_with,
+ mock_util_load_file,
+ mock_util_mount_cb,
+ mock_util_write_file,
+ ):
+ self.azure_ds = azure_ds
+ self.mock_azure_get_metadata_from_fabric = (
+ mock_azure_get_metadata_from_fabric
+ )
+ self.mock_azure_report_failure_to_fabric = (
+ mock_azure_report_failure_to_fabric
+ )
+ self.mock_net_dhcp_maybe_perform_dhcp_discovery = (
+ mock_net_dhcp_maybe_perform_dhcp_discovery
+ )
+ self.mock_net_dhcp_EphemeralIPv4Network = (
+ mock_net_dhcp_EphemeralIPv4Network
+ )
+ self.mock_dmi_read_dmi_data = mock_dmi_read_dmi_data
+ self.mock_get_interfaces = mock_get_interfaces
+ self.mock_get_interface_mac = mock_get_interface_mac
+ self.mock_netlink = mock_netlink
+ self.mock_os_path_isfile = mock_os_path_isfile
+ self.mock_readurl = mock_readurl
+ self.mock_subp_subp = mock_subp_subp
+ self.mock_util_ensure_dir = mock_util_ensure_dir
+ self.mock_util_find_devs_with = mock_util_find_devs_with
+ self.mock_util_load_file = mock_util_load_file
+ self.mock_util_mount_cb = mock_util_mount_cb
+ self.mock_util_write_file = mock_util_write_file
+
+ self.imds_md = {
+ "extended": {"compute": {"ppsType": "None"}},
+ "network": {
+ "interface": [
+ {
+ "ipv4": {
+ "ipAddress": [
+ {
+ "privateIpAddress": "10.0.0.22",
+ "publicIpAddress": "",
+ }
+ ],
+ "subnet": [
+ {"address": "10.0.0.0", "prefix": "24"}
+ ],
+ },
+ "ipv6": {"ipAddress": []},
+ "macAddress": "011122334455",
+ },
+ ]
+ },
+ }
+
+ def test_no_pps(self):
+ self.mock_readurl.side_effect = [
+ mock.MagicMock(contents=json.dumps(self.imds_md).encode()),
+ ]
+ self.mock_azure_get_metadata_from_fabric.return_value = []
+ self.mock_os_path_isfile.side_effect = [False, False, False]
+
+ self.azure_ds._get_data()
+
+ assert self.mock_os_path_isfile.mock_calls == [
+ mock.call("/var/lib/cloud/data/poll_imds"),
+ mock.call(
+ os.path.join(
+ self.azure_ds.paths.cloud_dir, "seed/azure/ovf-env.xml"
+ )
+ ),
+ mock.call("/var/lib/cloud/data/poll_imds"),
+ ]
+
+ assert self.mock_readurl.mock_calls == [
+ mock.call(
+ "http://169.254.169.254/metadata/instance?"
+ "api-version=2021-08-01&extended=true",
+ timeout=2,
+ headers={"Metadata": "true"},
+ retries=0,
+ exception_cb=dsaz.retry_on_url_exc,
+ infinite=False,
+ ),
+ ]
+
+ # Verify DHCP is setup once.
+ assert self.mock_net_dhcp_maybe_perform_dhcp_discovery.mock_calls == [
+ mock.call(None, dsaz.dhcp_log_cb)
+ ]
+ assert self.azure_ds._wireserver_endpoint == "aa:bb:cc:dd"
+ assert self.azure_ds._is_ephemeral_networking_up() is False
+
+ # Verify DMI usage.
+ assert self.mock_dmi_read_dmi_data.mock_calls == [
+ mock.call("system-uuid")
+ ]
+ assert self.azure_ds.metadata["instance-id"] == "fake-system-uuid"
+
+ # Verify IMDS metadata.
+ assert self.azure_ds.metadata["imds"] == self.imds_md
+
+ # Verify reporting ready once.
+ assert self.mock_azure_get_metadata_from_fabric.mock_calls == [
+ mock.call(
+ fallback_lease_file=None,
+ dhcp_opts="aa:bb:cc:dd",
+ iso_dev="/dev/sr0",
+ pubkey_info=None,
+ )
+ ]
+
+ # Verify netlink.
+ assert self.mock_netlink.mock_calls == []
+
+ def test_running_pps(self):
+ self.imds_md["extended"]["compute"]["ppsType"] = "Running"
+ ovf_data = {"HostName": "myhost", "UserName": "myuser"}
+
+ nl_sock = mock.MagicMock()
+ self.mock_netlink.create_bound_netlink_socket.return_value = nl_sock
+ self.mock_readurl.side_effect = [
+ mock.MagicMock(contents=json.dumps(self.imds_md).encode()),
+ mock.MagicMock(
+ contents=construct_valid_ovf_env(data=ovf_data).encode()
+ ),
+ mock.MagicMock(contents=json.dumps(self.imds_md).encode()),
+ ]
+ self.mock_azure_get_metadata_from_fabric.return_value = []
+ self.mock_os_path_isfile.side_effect = [False, False, False, False]
+
+ self.azure_ds._get_data()
+
+ assert self.mock_os_path_isfile.mock_calls == [
+ mock.call("/var/lib/cloud/data/poll_imds"),
+ mock.call(
+ os.path.join(
+ self.azure_ds.paths.cloud_dir, "seed/azure/ovf-env.xml"
+ )
+ ),
+ mock.call("/var/lib/cloud/data/poll_imds"),
+ mock.call("/var/lib/cloud/data/reported_ready"),
+ ]
+
+ assert self.mock_readurl.mock_calls == [
+ mock.call(
+ "http://169.254.169.254/metadata/instance?"
+ "api-version=2021-08-01&extended=true",
+ timeout=2,
+ headers={"Metadata": "true"},
+ retries=0,
+ exception_cb=dsaz.retry_on_url_exc,
+ infinite=False,
+ ),
+ mock.call(
+ "http://169.254.169.254/metadata/reprovisiondata?"
+ "api-version=2019-06-01",
+ timeout=2,
+ headers={"Metadata": "true"},
+ exception_cb=mock.ANY,
+ infinite=True,
+ log_req_resp=False,
+ ),
+ mock.call(
+ "http://169.254.169.254/metadata/instance?"
+ "api-version=2021-08-01&extended=true",
+ timeout=2,
+ headers={"Metadata": "true"},
+ retries=0,
+ exception_cb=dsaz.retry_on_url_exc,
+ infinite=False,
+ ),
+ ]
+
+ # Verify DHCP is setup twice.
+ assert self.mock_net_dhcp_maybe_perform_dhcp_discovery.mock_calls == [
+ mock.call(None, dsaz.dhcp_log_cb),
+ mock.call(None, dsaz.dhcp_log_cb),
+ ]
+ assert self.azure_ds._wireserver_endpoint == "aa:bb:cc:dd"
+ assert self.azure_ds._is_ephemeral_networking_up() is False
+
+ # Verify DMI usage.
+ assert self.mock_dmi_read_dmi_data.mock_calls == [
+ mock.call("system-uuid")
+ ]
+ assert self.azure_ds.metadata["instance-id"] == "fake-system-uuid"
+
+ # Verify IMDS metadata.
+ assert self.azure_ds.metadata["imds"] == self.imds_md
+
+ # Verify reporting ready twice.
+ assert self.mock_azure_get_metadata_from_fabric.mock_calls == [
+ mock.call(
+ fallback_lease_file=None,
+ dhcp_opts="aa:bb:cc:dd",
+ iso_dev="/dev/sr0",
+ pubkey_info=None,
+ ),
+ mock.call(
+ fallback_lease_file=None,
+ dhcp_opts="aa:bb:cc:dd",
+ iso_dev=None,
+ pubkey_info=None,
+ ),
+ ]
+
+ # Verify netlink operations for Running PPS.
+ assert self.mock_netlink.mock_calls == [
+ mock.call.create_bound_netlink_socket(),
+ mock.call.wait_for_media_disconnect_connect(mock.ANY, "ethBoot0"),
+ mock.call.create_bound_netlink_socket().__bool__(),
+ mock.call.create_bound_netlink_socket().close(),
+ ]
+
+ def test_savable_pps(self):
+ self.imds_md["extended"]["compute"]["ppsType"] = "Savable"
+ ovf_data = {"HostName": "myhost", "UserName": "myuser"}
+
+ nl_sock = mock.MagicMock()
+ self.mock_netlink.create_bound_netlink_socket.return_value = nl_sock
+ self.mock_netlink.wait_for_nic_detach_event.return_value = "eth9"
+ self.mock_netlink.wait_for_nic_attach_event.return_value = (
+ "ethAttached1"
+ )
+ self.mock_readurl.side_effect = [
+ mock.MagicMock(contents=json.dumps(self.imds_md).encode()),
+ mock.MagicMock(
+ contents=json.dumps(self.imds_md["network"]).encode()
+ ),
+ mock.MagicMock(
+ contents=construct_valid_ovf_env(data=ovf_data).encode()
+ ),
+ mock.MagicMock(contents=json.dumps(self.imds_md).encode()),
+ ]
+ self.mock_azure_get_metadata_from_fabric.return_value = []
+ self.mock_os_path_isfile.side_effect = [
+ False, # /var/lib/cloud/data/poll_imds
+ False, # seed/azure/ovf-env.xml
+ False, # /var/lib/cloud/data/poll_imds
+ False, # /var/lib/cloud/data/reported_ready
+ False, # /var/lib/cloud/data/reported_ready
+ False, # /var/lib/cloud/data/nic_detached
+ True, # /var/lib/cloud/data/reported_ready
+ ]
+ self.azure_ds._fallback_interface = False
+
+ self.azure_ds._get_data()
+
+ assert self.mock_os_path_isfile.mock_calls == [
+ mock.call("/var/lib/cloud/data/poll_imds"),
+ mock.call(
+ os.path.join(
+ self.azure_ds.paths.cloud_dir, "seed/azure/ovf-env.xml"
+ )
+ ),
+ mock.call("/var/lib/cloud/data/poll_imds"),
+ mock.call("/var/lib/cloud/data/reported_ready"),
+ mock.call("/var/lib/cloud/data/reported_ready"),
+ mock.call("/var/lib/cloud/data/nic_detached"),
+ mock.call("/var/lib/cloud/data/reported_ready"),
+ ]
+
+ assert self.mock_readurl.mock_calls == [
+ mock.call(
+ "http://169.254.169.254/metadata/instance?"
+ "api-version=2021-08-01&extended=true",
+ timeout=2,
+ headers={"Metadata": "true"},
+ retries=0,
+ exception_cb=dsaz.retry_on_url_exc,
+ infinite=False,
+ ),
+ mock.call(
+ "http://169.254.169.254/metadata/instance/network?"
+ "api-version=2019-06-01",
+ timeout=2,
+ headers={"Metadata": "true"},
+ retries=0,
+ exception_cb=mock.ANY,
+ infinite=True,
+ ),
+ mock.call(
+ "http://169.254.169.254/metadata/reprovisiondata?"
+ "api-version=2019-06-01",
+ timeout=2,
+ headers={"Metadata": "true"},
+ exception_cb=mock.ANY,
+ infinite=True,
+ log_req_resp=False,
+ ),
+ mock.call(
+ "http://169.254.169.254/metadata/instance?"
+ "api-version=2021-08-01&extended=true",
+ timeout=2,
+ headers={"Metadata": "true"},
+ retries=0,
+ exception_cb=dsaz.retry_on_url_exc,
+ infinite=False,
+ ),
+ ]
+
+ # Verify DHCP is setup twice.
+ assert self.mock_net_dhcp_maybe_perform_dhcp_discovery.mock_calls == [
+ mock.call(None, dsaz.dhcp_log_cb),
+ mock.call("ethAttached1", dsaz.dhcp_log_cb),
+ ]
+ assert self.azure_ds._wireserver_endpoint == "aa:bb:cc:dd"
+ assert self.azure_ds._is_ephemeral_networking_up() is False
+
+ # Verify DMI usage.
+ assert self.mock_dmi_read_dmi_data.mock_calls == [
+ mock.call("system-uuid")
+ ]
+ assert self.azure_ds.metadata["instance-id"] == "fake-system-uuid"
+
+ # Verify IMDS metadata.
+ assert self.azure_ds.metadata["imds"] == self.imds_md
+
+ # Verify reporting ready twice.
+ assert self.mock_azure_get_metadata_from_fabric.mock_calls == [
+ mock.call(
+ fallback_lease_file=None,
+ dhcp_opts="aa:bb:cc:dd",
+ iso_dev="/dev/sr0",
+ pubkey_info=None,
+ ),
+ mock.call(
+ fallback_lease_file=None,
+ dhcp_opts="aa:bb:cc:dd",
+ iso_dev=None,
+ pubkey_info=None,
+ ),
+ ]
+
+ # Verify netlink operations for Savable PPS.
+ assert self.mock_netlink.mock_calls == [
+ mock.call.create_bound_netlink_socket(),
+ mock.call.wait_for_nic_detach_event(nl_sock),
+ mock.call.wait_for_nic_attach_event(nl_sock, ["ethAttached1"]),
+ mock.call.create_bound_netlink_socket().__bool__(),
+ mock.call.create_bound_netlink_socket().close(),
+ ]
+
+
+class TestValidateIMDSMetadata:
+ @pytest.mark.parametrize(
+ "mac,expected",
+ [
+ ("001122aabbcc", "00:11:22:aa:bb:cc"),
+ ("001122AABBCC", "00:11:22:aa:bb:cc"),
+ ("00:11:22:aa:bb:cc", "00:11:22:aa:bb:cc"),
+ ("00:11:22:AA:BB:CC", "00:11:22:aa:bb:cc"),
+ ("pass-through-the-unexpected", "pass-through-the-unexpected"),
+ ("", ""),
+ ],
+ )
+ def test_normalize_scenarios(self, mac, expected):
+ normalized = dsaz.normalize_mac_address(mac)
+ assert normalized == expected
+
+ def test_empty(
+ self, azure_ds, caplog, mock_get_interfaces, mock_get_interface_mac
+ ):
+ imds_md = {}
+
+ assert azure_ds.validate_imds_network_metadata(imds_md) is False
+ assert (
+ "cloudinit.sources.DataSourceAzure",
+ 30,
+ "IMDS network metadata has incomplete configuration: None",
+ ) in caplog.record_tuples
+
+ def test_validates_one_nic(
+ self, azure_ds, mock_get_interfaces, mock_get_interface_mac
+ ):
+
+ mock_get_interfaces.return_value = [
+ ("dummy0", "9e:65:d6:19:19:01", None, None),
+ ("test0", "00:11:22:33:44:55", "hv_netvsc", "0x3"),
+ ("lo", "00:00:00:00:00:00", None, None),
+ ]
+ azure_ds._ephemeral_dhcp_ctx = mock.Mock(iface="test0")
+
+ imds_md = {
+ "network": {
+ "interface": [
+ {
+ "ipv4": {
+ "ipAddress": [
+ {
+ "privateIpAddress": "10.0.0.22",
+ "publicIpAddress": "",
+ }
+ ],
+ "subnet": [
+ {"address": "10.0.0.0", "prefix": "24"}
+ ],
+ },
+ "ipv6": {"ipAddress": []},
+ "macAddress": "001122334455",
+ }
+ ]
+ }
+ }
+
+ assert azure_ds.validate_imds_network_metadata(imds_md) is True
+
+ def test_validates_multiple_nic(
+ self, azure_ds, mock_get_interfaces, mock_get_interface_mac
+ ):
+
+ mock_get_interfaces.return_value = [
+ ("dummy0", "9e:65:d6:19:19:01", None, None),
+ ("test0", "00:11:22:33:44:55", "hv_netvsc", "0x3"),
+ ("test1", "01:11:22:33:44:55", "hv_netvsc", "0x3"),
+ ("lo", "00:00:00:00:00:00", None, None),
+ ]
+ azure_ds._ephemeral_dhcp_ctx = mock.Mock(iface="test0")
+
+ imds_md = {
+ "network": {
+ "interface": [
+ {
+ "ipv4": {
+ "ipAddress": [
+ {
+ "privateIpAddress": "10.0.0.22",
+ "publicIpAddress": "",
+ }
+ ],
+ "subnet": [
+ {"address": "10.0.0.0", "prefix": "24"}
+ ],
+ },
+ "ipv6": {"ipAddress": []},
+ "macAddress": "001122334455",
+ },
+ {
+ "ipv4": {
+ "ipAddress": [
+ {
+ "privateIpAddress": "10.0.0.22",
+ "publicIpAddress": "",
+ }
+ ],
+ "subnet": [
+ {"address": "10.0.0.0", "prefix": "24"}
+ ],
+ },
+ "ipv6": {"ipAddress": []},
+ "macAddress": "011122334455",
+ },
+ ]
+ }
+ }
+
+ assert azure_ds.validate_imds_network_metadata(imds_md) is True
+
+ def test_missing_all(
+ self, azure_ds, caplog, mock_get_interfaces, mock_get_interface_mac
+ ):
+
+ mock_get_interfaces.return_value = [
+ ("dummy0", "9e:65:d6:19:19:01", None, None),
+ ("test0", "00:11:22:33:44:55", "hv_netvsc", "0x3"),
+ ("test1", "01:11:22:33:44:55", "hv_netvsc", "0x3"),
+ ("lo", "00:00:00:00:00:00", None, None),
+ ]
+ azure_ds._ephemeral_dhcp_ctx = mock.Mock(iface="test0")
+
+ imds_md = {"network": {"interface": []}}
+
+ assert azure_ds.validate_imds_network_metadata(imds_md) is False
+ assert (
+ "cloudinit.sources.DataSourceAzure",
+ 30,
+ "IMDS network metadata is missing configuration for NICs "
+ "['00:11:22:33:44:55', '01:11:22:33:44:55']: "
+ f"{imds_md['network']!r}",
+ ) in caplog.record_tuples
+
+ def test_missing_primary(
+ self, azure_ds, caplog, mock_get_interfaces, mock_get_interface_mac
+ ):
+
+ mock_get_interfaces.return_value = [
+ ("dummy0", "9e:65:d6:19:19:01", None, None),
+ ("test0", "00:11:22:33:44:55", "hv_netvsc", "0x3"),
+ ("test1", "01:11:22:33:44:55", "hv_netvsc", "0x3"),
+ ("lo", "00:00:00:00:00:00", None, None),
+ ]
+ azure_ds._ephemeral_dhcp_ctx = mock.Mock(iface="test0")
+
+ imds_md = {
+ "network": {
+ "interface": [
+ {
+ "ipv4": {
+ "ipAddress": [
+ {
+ "privateIpAddress": "10.0.0.22",
+ "publicIpAddress": "",
+ }
+ ],
+ "subnet": [
+ {"address": "10.0.0.0", "prefix": "24"}
+ ],
+ },
+ "ipv6": {"ipAddress": []},
+ "macAddress": "011122334455",
+ },
+ ]
+ }
+ }
+
+ assert azure_ds.validate_imds_network_metadata(imds_md) is False
+ assert (
+ "cloudinit.sources.DataSourceAzure",
+ 30,
+ "IMDS network metadata is missing configuration for NICs "
+ f"['00:11:22:33:44:55']: {imds_md['network']!r}",
+ ) in caplog.record_tuples
+ assert (
+ "cloudinit.sources.DataSourceAzure",
+ 30,
+ "IMDS network metadata is missing primary NIC "
+ f"'00:11:22:33:44:55': {imds_md['network']!r}",
+ ) in caplog.record_tuples
+
+ def test_missing_secondary(
+ self, azure_ds, mock_get_interfaces, mock_get_interface_mac
+ ):
+
+ mock_get_interfaces.return_value = [
+ ("dummy0", "9e:65:d6:19:19:01", None, None),
+ ("test0", "00:11:22:33:44:55", "hv_netvsc", "0x3"),
+ ("test1", "01:11:22:33:44:55", "hv_netvsc", "0x3"),
+ ("lo", "00:00:00:00:00:00", None, None),
+ ]
+ azure_ds._ephemeral_dhcp_ctx = mock.Mock(iface="test0")
+
+ imds_md = {
+ "network": {
+ "interface": [
+ {
+ "ipv4": {
+ "ipAddress": [
+ {
+ "privateIpAddress": "10.0.0.22",
+ "publicIpAddress": "",
+ }
+ ],
+ "subnet": [
+ {"address": "10.0.0.0", "prefix": "24"}
+ ],
+ },
+ "ipv6": {"ipAddress": []},
+ "macAddress": "001122334455",
+ },
+ ]
+ }
+ }
+
+ assert azure_ds.validate_imds_network_metadata(imds_md) is False
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/sources/test_azure_helper.py
index b8899807..98143bc3 100644
--- a/tests/unittests/test_datasource/test_azure_helper.py
+++ b/tests/unittests/sources/test_azure_helper.py
@@ -9,10 +9,9 @@ from xml.etree import ElementTree
from xml.sax.saxutils import escape, unescape
from cloudinit.sources.helpers import azure as azure_helper
-from cloudinit.tests.helpers import CiTestCase, ExitStack, mock, populate_dir
-
-from cloudinit.util import load_file
from cloudinit.sources.helpers.azure import WALinuxAgentShim as wa_shim
+from cloudinit.util import load_file
+from tests.unittests.helpers import CiTestCase, ExitStack, mock, populate_dir
GOAL_STATE_TEMPLATE = """\
<?xml version="1.0" encoding="utf-8"?>
@@ -52,7 +51,7 @@ GOAL_STATE_TEMPLATE = """\
</GoalState>
"""
-HEALTH_REPORT_XML_TEMPLATE = '''\
+HEALTH_REPORT_XML_TEMPLATE = """\
<?xml version="1.0" encoding="utf-8"?>
<Health xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xsd="http://www.w3.org/2001/XMLSchema">
@@ -70,14 +69,16 @@ HEALTH_REPORT_XML_TEMPLATE = '''\
</RoleInstanceList>
</Container>
</Health>
-'''
+"""
-HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE = dedent('''\
+HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE = dedent(
+ """\
<Details>
<SubStatus>{health_substatus}</SubStatus>
<Description>{health_description}</Description>
</Details>
- ''')
+ """
+)
HEALTH_REPORT_DESCRIPTION_TRIM_LEN = 512
@@ -87,24 +88,27 @@ class SentinelException(Exception):
class TestFindEndpoint(CiTestCase):
-
def setUp(self):
super(TestFindEndpoint, self).setUp()
patches = ExitStack()
self.addCleanup(patches.close)
self.load_file = patches.enter_context(
- mock.patch.object(azure_helper.util, 'load_file'))
+ mock.patch.object(azure_helper.util, "load_file")
+ )
self.dhcp_options = patches.enter_context(
- mock.patch.object(wa_shim, '_load_dhclient_json'))
+ mock.patch.object(wa_shim, "_load_dhclient_json")
+ )
self.networkd_leases = patches.enter_context(
- mock.patch.object(wa_shim, '_networkd_get_value_from_leases'))
+ mock.patch.object(wa_shim, "_networkd_get_value_from_leases")
+ )
self.networkd_leases.return_value = None
def test_missing_file(self):
- """wa_shim find_endpoint uses default endpoint if leasefile not found
+ """wa_shim find_endpoint uses default endpoint if
+ leasefile not found
"""
self.assertEqual(wa_shim.find_endpoint(), "168.63.129.16")
@@ -112,82 +116,93 @@ class TestFindEndpoint(CiTestCase):
"""wa_shim find_endpoint uses default endpoint if leasefile is found
but does not contain DHCP Option 245 (whose value is the endpoint)
"""
- self.load_file.return_value = ''
- self.dhcp_options.return_value = {'eth0': {'key': 'value'}}
+ self.load_file.return_value = ""
+ self.dhcp_options.return_value = {"eth0": {"key": "value"}}
self.assertEqual(wa_shim.find_endpoint(), "168.63.129.16")
@staticmethod
def _build_lease_content(encoded_address):
endpoint = azure_helper._get_dhcp_endpoint_option_name()
- return '\n'.join([
- 'lease {',
- ' interface "eth0";',
- ' option {0} {1};'.format(endpoint, encoded_address),
- '}'])
+ return "\n".join(
+ [
+ "lease {",
+ ' interface "eth0";',
+ " option {0} {1};".format(endpoint, encoded_address),
+ "}",
+ ]
+ )
def test_from_dhcp_client(self):
self.dhcp_options.return_value = {"eth0": {"unknown_245": "5:4:3:2"}}
- self.assertEqual('5.4.3.2', wa_shim.find_endpoint(None))
-
- @mock.patch('cloudinit.sources.helpers.azure.util.is_FreeBSD')
- def test_latest_lease_used(self, m_is_freebsd):
- m_is_freebsd.return_value = False # To avoid hitting load_file
- encoded_addresses = ['5:4:3:2', '4:3:2:1']
- file_content = '\n'.join([self._build_lease_content(encoded_address)
- for encoded_address in encoded_addresses])
+ self.assertEqual("5.4.3.2", wa_shim.find_endpoint(None))
+
+ def test_latest_lease_used(self):
+ encoded_addresses = ["5:4:3:2", "4:3:2:1"]
+ file_content = "\n".join(
+ [
+ self._build_lease_content(encoded_address)
+ for encoded_address in encoded_addresses
+ ]
+ )
self.load_file.return_value = file_content
- self.assertEqual(encoded_addresses[-1].replace(':', '.'),
- wa_shim.find_endpoint("foobar"))
+ self.assertEqual(
+ encoded_addresses[-1].replace(":", "."),
+ wa_shim.find_endpoint("foobar"),
+ )
class TestExtractIpAddressFromLeaseValue(CiTestCase):
-
def test_hex_string(self):
- ip_address, encoded_address = '98.76.54.32', '62:4c:36:20'
+ ip_address, encoded_address = "98.76.54.32", "62:4c:36:20"
self.assertEqual(
- ip_address, wa_shim.get_ip_from_lease_value(encoded_address))
+ ip_address, wa_shim.get_ip_from_lease_value(encoded_address)
+ )
def test_hex_string_with_single_character_part(self):
- ip_address, encoded_address = '4.3.2.1', '4:3:2:1'
+ ip_address, encoded_address = "4.3.2.1", "4:3:2:1"
self.assertEqual(
- ip_address, wa_shim.get_ip_from_lease_value(encoded_address))
+ ip_address, wa_shim.get_ip_from_lease_value(encoded_address)
+ )
def test_packed_string(self):
- ip_address, encoded_address = '98.76.54.32', 'bL6 '
+ ip_address, encoded_address = "98.76.54.32", "bL6 "
self.assertEqual(
- ip_address, wa_shim.get_ip_from_lease_value(encoded_address))
+ ip_address, wa_shim.get_ip_from_lease_value(encoded_address)
+ )
def test_packed_string_with_escaped_quote(self):
- ip_address, encoded_address = '100.72.34.108', 'dH\\"l'
+ ip_address, encoded_address = "100.72.34.108", 'dH\\"l'
self.assertEqual(
- ip_address, wa_shim.get_ip_from_lease_value(encoded_address))
+ ip_address, wa_shim.get_ip_from_lease_value(encoded_address)
+ )
def test_packed_string_containing_a_colon(self):
- ip_address, encoded_address = '100.72.58.108', 'dH:l'
+ ip_address, encoded_address = "100.72.58.108", "dH:l"
self.assertEqual(
- ip_address, wa_shim.get_ip_from_lease_value(encoded_address))
+ ip_address, wa_shim.get_ip_from_lease_value(encoded_address)
+ )
class TestGoalStateParsing(CiTestCase):
default_parameters = {
- 'incarnation': 1,
- 'container_id': 'MyContainerId',
- 'instance_id': 'MyInstanceId',
- 'certificates_url': 'MyCertificatesUrl',
+ "incarnation": 1,
+ "container_id": "MyContainerId",
+ "instance_id": "MyInstanceId",
+ "certificates_url": "MyCertificatesUrl",
}
def _get_formatted_goal_state_xml_string(self, **kwargs):
parameters = self.default_parameters.copy()
parameters.update(kwargs)
xml = GOAL_STATE_TEMPLATE.format(**parameters)
- if parameters['certificates_url'] is None:
+ if parameters["certificates_url"] is None:
new_xml_lines = []
for line in xml.splitlines():
- if 'Certificates' in line:
+ if "Certificates" in line:
continue
new_xml_lines.append(line)
- xml = '\n'.join(new_xml_lines)
+ xml = "\n".join(new_xml_lines)
return xml
def _get_goal_state(self, m_azure_endpoint_client=None, **kwargs):
@@ -197,17 +212,17 @@ class TestGoalStateParsing(CiTestCase):
return azure_helper.GoalState(xml, m_azure_endpoint_client)
def test_incarnation_parsed_correctly(self):
- incarnation = '123'
+ incarnation = "123"
goal_state = self._get_goal_state(incarnation=incarnation)
self.assertEqual(incarnation, goal_state.incarnation)
def test_container_id_parsed_correctly(self):
- container_id = 'TestContainerId'
+ container_id = "TestContainerId"
goal_state = self._get_goal_state(container_id=container_id)
self.assertEqual(container_id, goal_state.container_id)
def test_instance_id_parsed_correctly(self):
- instance_id = 'TestInstanceId'
+ instance_id = "TestInstanceId"
goal_state = self._get_goal_state(instance_id=instance_id)
self.assertEqual(instance_id, goal_state.instance_id)
@@ -216,67 +231,72 @@ class TestGoalStateParsing(CiTestCase):
previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8"
current_iid = "544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8"
self.assertTrue(
- azure_helper.is_byte_swapped(previous_iid, current_iid))
+ azure_helper.is_byte_swapped(previous_iid, current_iid)
+ )
def test_instance_id_no_byte_swap_same_instance_id(self):
previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8"
current_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8"
self.assertFalse(
- azure_helper.is_byte_swapped(previous_iid, current_iid))
+ azure_helper.is_byte_swapped(previous_iid, current_iid)
+ )
def test_instance_id_no_byte_swap_diff_instance_id(self):
previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8"
current_iid = "G0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8"
self.assertFalse(
- azure_helper.is_byte_swapped(previous_iid, current_iid))
+ azure_helper.is_byte_swapped(previous_iid, current_iid)
+ )
def test_certificates_xml_parsed_and_fetched_correctly(self):
m_azure_endpoint_client = mock.MagicMock()
- certificates_url = 'TestCertificatesUrl'
+ certificates_url = "TestCertificatesUrl"
goal_state = self._get_goal_state(
m_azure_endpoint_client=m_azure_endpoint_client,
- certificates_url=certificates_url)
+ certificates_url=certificates_url,
+ )
certificates_xml = goal_state.certificates_xml
self.assertEqual(1, m_azure_endpoint_client.get.call_count)
self.assertEqual(
- certificates_url,
- m_azure_endpoint_client.get.call_args[0][0])
+ certificates_url, m_azure_endpoint_client.get.call_args[0][0]
+ )
self.assertTrue(
- m_azure_endpoint_client.get.call_args[1].get(
- 'secure', False))
+ m_azure_endpoint_client.get.call_args[1].get("secure", False)
+ )
self.assertEqual(
- m_azure_endpoint_client.get.return_value.contents,
- certificates_xml)
+ m_azure_endpoint_client.get.return_value.contents, certificates_xml
+ )
def test_missing_certificates_skips_http_get(self):
m_azure_endpoint_client = mock.MagicMock()
goal_state = self._get_goal_state(
m_azure_endpoint_client=m_azure_endpoint_client,
- certificates_url=None)
+ certificates_url=None,
+ )
certificates_xml = goal_state.certificates_xml
self.assertEqual(0, m_azure_endpoint_client.get.call_count)
self.assertIsNone(certificates_xml)
def test_invalid_goal_state_xml_raises_parse_error(self):
- xml = 'random non-xml data'
+ xml = "random non-xml data"
with self.assertRaises(ElementTree.ParseError):
azure_helper.GoalState(xml, mock.MagicMock())
def test_missing_container_id_in_goal_state_xml_raises_exc(self):
xml = self._get_formatted_goal_state_xml_string()
- xml = re.sub('<ContainerId>.*</ContainerId>', '', xml)
+ xml = re.sub("<ContainerId>.*</ContainerId>", "", xml)
with self.assertRaises(azure_helper.InvalidGoalStateXMLException):
azure_helper.GoalState(xml, mock.MagicMock())
def test_missing_instance_id_in_goal_state_xml_raises_exc(self):
xml = self._get_formatted_goal_state_xml_string()
- xml = re.sub('<InstanceId>.*</InstanceId>', '', xml)
+ xml = re.sub("<InstanceId>.*</InstanceId>", "", xml)
with self.assertRaises(azure_helper.InvalidGoalStateXMLException):
azure_helper.GoalState(xml, mock.MagicMock())
def test_missing_incarnation_in_goal_state_xml_raises_exc(self):
xml = self._get_formatted_goal_state_xml_string()
- xml = re.sub('<Incarnation>.*</Incarnation>', '', xml)
+ xml = re.sub("<Incarnation>.*</Incarnation>", "", xml)
with self.assertRaises(azure_helper.InvalidGoalStateXMLException):
azure_helper.GoalState(xml, mock.MagicMock())
@@ -284,8 +304,8 @@ class TestGoalStateParsing(CiTestCase):
class TestAzureEndpointHttpClient(CiTestCase):
regular_headers = {
- 'x-ms-agent-name': 'WALinuxAgent',
- 'x-ms-version': '2012-11-30',
+ "x-ms-agent-name": "WALinuxAgent",
+ "x-ms-version": "2012-11-30",
}
def setUp(self):
@@ -293,43 +313,48 @@ class TestAzureEndpointHttpClient(CiTestCase):
patches = ExitStack()
self.addCleanup(patches.close)
self.m_http_with_retries = patches.enter_context(
- mock.patch.object(azure_helper, 'http_with_retries'))
+ mock.patch.object(azure_helper, "http_with_retries")
+ )
def test_non_secure_get(self):
client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
- url = 'MyTestUrl'
+ url = "MyTestUrl"
response = client.get(url, secure=False)
self.assertEqual(1, self.m_http_with_retries.call_count)
self.assertEqual(self.m_http_with_retries.return_value, response)
self.assertEqual(
mock.call(url, headers=self.regular_headers),
- self.m_http_with_retries.call_args)
+ self.m_http_with_retries.call_args,
+ )
def test_non_secure_get_raises_exception(self):
client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
- url = 'MyTestUrl'
+ url = "MyTestUrl"
self.m_http_with_retries.side_effect = SentinelException
self.assertRaises(SentinelException, client.get, url, secure=False)
self.assertEqual(1, self.m_http_with_retries.call_count)
def test_secure_get(self):
- url = 'MyTestUrl'
+ url = "MyTestUrl"
m_certificate = mock.MagicMock()
expected_headers = self.regular_headers.copy()
- expected_headers.update({
- "x-ms-cipher-name": "DES_EDE3_CBC",
- "x-ms-guest-agent-public-x509-cert": m_certificate,
- })
+ expected_headers.update(
+ {
+ "x-ms-cipher-name": "DES_EDE3_CBC",
+ "x-ms-guest-agent-public-x509-cert": m_certificate,
+ }
+ )
client = azure_helper.AzureEndpointHttpClient(m_certificate)
response = client.get(url, secure=True)
self.assertEqual(1, self.m_http_with_retries.call_count)
self.assertEqual(self.m_http_with_retries.return_value, response)
self.assertEqual(
mock.call(url, headers=expected_headers),
- self.m_http_with_retries.call_args)
+ self.m_http_with_retries.call_args,
+ )
def test_secure_get_raises_exception(self):
- url = 'MyTestUrl'
+ url = "MyTestUrl"
client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
self.m_http_with_retries.side_effect = SentinelException
self.assertRaises(SentinelException, client.get, url, secure=True)
@@ -337,44 +362,50 @@ class TestAzureEndpointHttpClient(CiTestCase):
def test_post(self):
m_data = mock.MagicMock()
- url = 'MyTestUrl'
+ url = "MyTestUrl"
client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
response = client.post(url, data=m_data)
self.assertEqual(1, self.m_http_with_retries.call_count)
self.assertEqual(self.m_http_with_retries.return_value, response)
self.assertEqual(
mock.call(url, data=m_data, headers=self.regular_headers),
- self.m_http_with_retries.call_args)
+ self.m_http_with_retries.call_args,
+ )
def test_post_raises_exception(self):
m_data = mock.MagicMock()
- url = 'MyTestUrl'
+ url = "MyTestUrl"
client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
self.m_http_with_retries.side_effect = SentinelException
self.assertRaises(SentinelException, client.post, url, data=m_data)
self.assertEqual(1, self.m_http_with_retries.call_count)
def test_post_with_extra_headers(self):
- url = 'MyTestUrl'
+ url = "MyTestUrl"
client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
- extra_headers = {'test': 'header'}
+ extra_headers = {"test": "header"}
client.post(url, extra_headers=extra_headers)
expected_headers = self.regular_headers.copy()
expected_headers.update(extra_headers)
self.assertEqual(1, self.m_http_with_retries.call_count)
self.assertEqual(
mock.call(url, data=mock.ANY, headers=expected_headers),
- self.m_http_with_retries.call_args)
+ self.m_http_with_retries.call_args,
+ )
def test_post_with_sleep_with_extra_headers_raises_exception(self):
m_data = mock.MagicMock()
- url = 'MyTestUrl'
- extra_headers = {'test': 'header'}
+ url = "MyTestUrl"
+ extra_headers = {"test": "header"}
client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
self.m_http_with_retries.side_effect = SentinelException
self.assertRaises(
- SentinelException, client.post,
- url, data=m_data, extra_headers=extra_headers)
+ SentinelException,
+ client.post,
+ url,
+ data=m_data,
+ extra_headers=extra_headers,
+ )
self.assertEqual(1, self.m_http_with_retries.call_count)
@@ -384,6 +415,7 @@ class TestAzureHelperHttpWithRetries(CiTestCase):
max_readurl_attempts = 240
default_readurl_timeout = 5
+ sleep_duration_between_retries = 5
periodic_logging_attempts = 12
def setUp(self):
@@ -393,122 +425,139 @@ class TestAzureHelperHttpWithRetries(CiTestCase):
self.m_readurl = patches.enter_context(
mock.patch.object(
- azure_helper.url_helper, 'readurl', mock.MagicMock()))
- patches.enter_context(
- mock.patch.object(azure_helper.time, 'sleep', mock.MagicMock()))
+ azure_helper.url_helper, "readurl", mock.MagicMock()
+ )
+ )
+ self.m_sleep = patches.enter_context(
+ mock.patch.object(azure_helper.time, "sleep", autospec=True)
+ )
def test_http_with_retries(self):
- self.m_readurl.return_value = 'TestResp'
+ self.m_readurl.return_value = "TestResp"
self.assertEqual(
- azure_helper.http_with_retries('testurl'),
- self.m_readurl.return_value)
+ azure_helper.http_with_retries("testurl"),
+ self.m_readurl.return_value,
+ )
self.assertEqual(self.m_readurl.call_count, 1)
- def test_http_with_retries_propagates_readurl_exc_and_logs_exc(
- self):
+ def test_http_with_retries_propagates_readurl_exc_and_logs_exc(self):
self.m_readurl.side_effect = SentinelException
self.assertRaises(
- SentinelException, azure_helper.http_with_retries, 'testurl')
+ SentinelException, azure_helper.http_with_retries, "testurl"
+ )
self.assertEqual(self.m_readurl.call_count, self.max_readurl_attempts)
self.assertIsNotNone(
re.search(
- r'Failed HTTP request with Azure endpoint \S* during '
- r'attempt \d+ with exception: \S*',
- self.logs.getvalue()))
+ r"Failed HTTP request with Azure endpoint \S* during "
+ r"attempt \d+ with exception: \S*",
+ self.logs.getvalue(),
+ )
+ )
self.assertIsNone(
re.search(
- r'Successful HTTP request with Azure endpoint \S* after '
- r'\d+ attempts',
- self.logs.getvalue()))
+ r"Successful HTTP request with Azure endpoint \S* after "
+ r"\d+ attempts",
+ self.logs.getvalue(),
+ )
+ )
def test_http_with_retries_delayed_success_due_to_temporary_readurl_exc(
- self):
- self.m_readurl.side_effect = \
- [SentinelException] * self.periodic_logging_attempts + \
- ['TestResp']
- self.m_readurl.return_value = 'TestResp'
-
- response = azure_helper.http_with_retries('testurl')
+ self,
+ ):
+ self.m_readurl.side_effect = [
+ SentinelException
+ ] * self.periodic_logging_attempts + ["TestResp"]
+ self.m_readurl.return_value = "TestResp"
+
+ response = azure_helper.http_with_retries("testurl")
+ self.assertEqual(response, self.m_readurl.return_value)
self.assertEqual(
- response,
- self.m_readurl.return_value)
+ self.m_readurl.call_count, self.periodic_logging_attempts + 1
+ )
+
+ # Ensure that cloud-init did sleep between each failed request
self.assertEqual(
- self.m_readurl.call_count,
- self.periodic_logging_attempts + 1)
+ self.m_sleep.call_count, self.periodic_logging_attempts
+ )
+ self.m_sleep.assert_called_with(self.sleep_duration_between_retries)
def test_http_with_retries_long_delay_logs_periodic_failure_msg(self):
- self.m_readurl.side_effect = \
- [SentinelException] * self.periodic_logging_attempts + \
- ['TestResp']
- self.m_readurl.return_value = 'TestResp'
+ self.m_readurl.side_effect = [
+ SentinelException
+ ] * self.periodic_logging_attempts + ["TestResp"]
+ self.m_readurl.return_value = "TestResp"
- azure_helper.http_with_retries('testurl')
+ azure_helper.http_with_retries("testurl")
self.assertEqual(
- self.m_readurl.call_count,
- self.periodic_logging_attempts + 1)
+ self.m_readurl.call_count, self.periodic_logging_attempts + 1
+ )
self.assertIsNotNone(
re.search(
- r'Failed HTTP request with Azure endpoint \S* during '
- r'attempt \d+ with exception: \S*',
- self.logs.getvalue()))
+ r"Failed HTTP request with Azure endpoint \S* during "
+ r"attempt \d+ with exception: \S*",
+ self.logs.getvalue(),
+ )
+ )
self.assertIsNotNone(
re.search(
- r'Successful HTTP request with Azure endpoint \S* after '
- r'\d+ attempts',
- self.logs.getvalue()))
+ r"Successful HTTP request with Azure endpoint \S* after "
+ r"\d+ attempts",
+ self.logs.getvalue(),
+ )
+ )
def test_http_with_retries_short_delay_does_not_log_periodic_failure_msg(
- self):
- self.m_readurl.side_effect = \
- [SentinelException] * \
- (self.periodic_logging_attempts - 1) + \
- ['TestResp']
- self.m_readurl.return_value = 'TestResp'
-
- azure_helper.http_with_retries('testurl')
+ self,
+ ):
+ self.m_readurl.side_effect = [SentinelException] * (
+ self.periodic_logging_attempts - 1
+ ) + ["TestResp"]
+ self.m_readurl.return_value = "TestResp"
+
+ azure_helper.http_with_retries("testurl")
self.assertEqual(
- self.m_readurl.call_count,
- self.periodic_logging_attempts)
+ self.m_readurl.call_count, self.periodic_logging_attempts
+ )
self.assertIsNone(
re.search(
- r'Failed HTTP request with Azure endpoint \S* during '
- r'attempt \d+ with exception: \S*',
- self.logs.getvalue()))
+ r"Failed HTTP request with Azure endpoint \S* during "
+ r"attempt \d+ with exception: \S*",
+ self.logs.getvalue(),
+ )
+ )
self.assertIsNotNone(
re.search(
- r'Successful HTTP request with Azure endpoint \S* after '
- r'\d+ attempts',
- self.logs.getvalue()))
+ r"Successful HTTP request with Azure endpoint \S* after "
+ r"\d+ attempts",
+ self.logs.getvalue(),
+ )
+ )
def test_http_with_retries_calls_url_helper_readurl_with_args_kwargs(self):
testurl = mock.MagicMock()
kwargs = {
- 'headers': mock.MagicMock(),
- 'data': mock.MagicMock(),
+ "headers": mock.MagicMock(),
+ "data": mock.MagicMock(),
# timeout kwarg should not be modified or deleted if present
- 'timeout': mock.MagicMock()
+ "timeout": mock.MagicMock(),
}
azure_helper.http_with_retries(testurl, **kwargs)
self.m_readurl.assert_called_once_with(testurl, **kwargs)
def test_http_with_retries_adds_timeout_kwarg_if_not_present(self):
testurl = mock.MagicMock()
- kwargs = {
- 'headers': mock.MagicMock(),
- 'data': mock.MagicMock()
- }
+ kwargs = {"headers": mock.MagicMock(), "data": mock.MagicMock()}
expected_kwargs = copy.deepcopy(kwargs)
- expected_kwargs['timeout'] = self.default_readurl_timeout
+ expected_kwargs["timeout"] = self.default_readurl_timeout
azure_helper.http_with_retries(testurl, **kwargs)
self.m_readurl.assert_called_once_with(testurl, **expected_kwargs)
- def test_http_with_retries_deletes_retries_kwargs_passed_in(
- self):
+ def test_http_with_retries_deletes_retries_kwargs_passed_in(self):
"""http_with_retries already implements retry logic,
so url_helper.readurl should not have retries.
http_with_retries should delete kwargs that
@@ -516,44 +565,44 @@ class TestAzureHelperHttpWithRetries(CiTestCase):
"""
testurl = mock.MagicMock()
kwargs = {
- 'headers': mock.MagicMock(),
- 'data': mock.MagicMock(),
- 'timeout': mock.MagicMock(),
- 'retries': mock.MagicMock(),
- 'infinite': mock.MagicMock()
+ "headers": mock.MagicMock(),
+ "data": mock.MagicMock(),
+ "timeout": mock.MagicMock(),
+ "retries": mock.MagicMock(),
+ "infinite": mock.MagicMock(),
}
expected_kwargs = copy.deepcopy(kwargs)
- expected_kwargs.pop('retries', None)
- expected_kwargs.pop('infinite', None)
+ expected_kwargs.pop("retries", None)
+ expected_kwargs.pop("infinite", None)
azure_helper.http_with_retries(testurl, **kwargs)
self.m_readurl.assert_called_once_with(testurl, **expected_kwargs)
self.assertIn(
- 'retries kwarg passed in for communication with Azure endpoint.',
- self.logs.getvalue())
+ "retries kwarg passed in for communication with Azure endpoint.",
+ self.logs.getvalue(),
+ )
self.assertIn(
- 'infinite kwarg passed in for communication with Azure endpoint.',
- self.logs.getvalue())
+ "infinite kwarg passed in for communication with Azure endpoint.",
+ self.logs.getvalue(),
+ )
class TestOpenSSLManager(CiTestCase):
-
def setUp(self):
super(TestOpenSSLManager, self).setUp()
patches = ExitStack()
self.addCleanup(patches.close)
self.subp = patches.enter_context(
- mock.patch.object(azure_helper.subp, 'subp'))
+ mock.patch.object(azure_helper.subp, "subp")
+ )
try:
- self.open = patches.enter_context(
- mock.patch('__builtin__.open'))
+ self.open = patches.enter_context(mock.patch("__builtin__.open"))
except ImportError:
- self.open = patches.enter_context(
- mock.patch('builtins.open'))
+ self.open = patches.enter_context(mock.patch("builtins.open"))
- @mock.patch.object(azure_helper, 'cd', mock.MagicMock())
- @mock.patch.object(azure_helper.temp_utils, 'mkdtemp')
+ @mock.patch.object(azure_helper, "cd", mock.MagicMock())
+ @mock.patch.object(azure_helper.temp_utils, "mkdtemp")
def test_openssl_manager_creates_a_tmpdir(self, mkdtemp):
manager = azure_helper.OpenSSLManager()
self.assertEqual(mkdtemp.return_value, manager.tmpdir)
@@ -562,16 +611,16 @@ class TestOpenSSLManager(CiTestCase):
subp_directory = {}
def capture_directory(*args, **kwargs):
- subp_directory['path'] = os.getcwd()
+ subp_directory["path"] = os.getcwd()
self.subp.side_effect = capture_directory
manager = azure_helper.OpenSSLManager()
- self.assertEqual(manager.tmpdir, subp_directory['path'])
+ self.assertEqual(manager.tmpdir, subp_directory["path"])
manager.clean_up()
- @mock.patch.object(azure_helper, 'cd', mock.MagicMock())
- @mock.patch.object(azure_helper.temp_utils, 'mkdtemp', mock.MagicMock())
- @mock.patch.object(azure_helper.util, 'del_dir')
+ @mock.patch.object(azure_helper, "cd", mock.MagicMock())
+ @mock.patch.object(azure_helper.temp_utils, "mkdtemp", mock.MagicMock())
+ @mock.patch.object(azure_helper.util, "del_dir")
def test_clean_up(self, del_dir):
manager = azure_helper.OpenSSLManager()
manager.clean_up()
@@ -579,43 +628,42 @@ class TestOpenSSLManager(CiTestCase):
class TestOpenSSLManagerActions(CiTestCase):
-
def setUp(self):
super(TestOpenSSLManagerActions, self).setUp()
self.allowed_subp = True
def _data_file(self, name):
- path = 'tests/data/azure'
+ path = "tests/data/azure"
return os.path.join(path, name)
@unittest.skip("todo move to cloud_test")
def test_pubkey_extract(self):
- cert = load_file(self._data_file('pubkey_extract_cert'))
- good_key = load_file(self._data_file('pubkey_extract_ssh_key'))
+ cert = load_file(self._data_file("pubkey_extract_cert"))
+ good_key = load_file(self._data_file("pubkey_extract_ssh_key"))
sslmgr = azure_helper.OpenSSLManager()
key = sslmgr._get_ssh_key_from_cert(cert)
self.assertEqual(good_key, key)
- good_fingerprint = '073E19D14D1C799224C6A0FD8DDAB6A8BF27D473'
+ good_fingerprint = "073E19D14D1C799224C6A0FD8DDAB6A8BF27D473"
fingerprint = sslmgr._get_fingerprint_from_cert(cert)
self.assertEqual(good_fingerprint, fingerprint)
@unittest.skip("todo move to cloud_test")
- @mock.patch.object(azure_helper.OpenSSLManager, '_decrypt_certs_from_xml')
+ @mock.patch.object(azure_helper.OpenSSLManager, "_decrypt_certs_from_xml")
def test_parse_certificates(self, mock_decrypt_certs):
"""Azure control plane puts private keys as well as certificates
- into the Certificates XML object. Make sure only the public keys
- from certs are extracted and that fingerprints are converted to
- the form specified in the ovf-env.xml file.
+ into the Certificates XML object. Make sure only the public keys
+ from certs are extracted and that fingerprints are converted to
+ the form specified in the ovf-env.xml file.
"""
- cert_contents = load_file(self._data_file('parse_certificates_pem'))
- fingerprints = load_file(self._data_file(
- 'parse_certificates_fingerprints')
+ cert_contents = load_file(self._data_file("parse_certificates_pem"))
+ fingerprints = load_file(
+ self._data_file("parse_certificates_fingerprints")
).splitlines()
mock_decrypt_certs.return_value = cert_contents
sslmgr = azure_helper.OpenSSLManager()
- keys_by_fp = sslmgr.parse_certificates('')
+ keys_by_fp = sslmgr.parse_certificates("")
for fp in keys_by_fp.keys():
self.assertIn(fp, fingerprints)
for fp in fingerprints:
@@ -627,21 +675,23 @@ class TestGoalStateHealthReporter(CiTestCase):
maxDiff = None
default_parameters = {
- 'incarnation': 1634,
- 'container_id': 'MyContainerId',
- 'instance_id': 'MyInstanceId'
+ "incarnation": 1634,
+ "container_id": "MyContainerId",
+ "instance_id": "MyInstanceId",
}
- test_azure_endpoint = 'TestEndpoint'
- test_health_report_url = 'http://{0}/machine?comp=health'.format(
- test_azure_endpoint)
- test_default_headers = {'Content-Type': 'text/xml; charset=utf-8'}
+ test_azure_endpoint = "TestEndpoint"
+ test_health_report_url = "http://{0}/machine?comp=health".format(
+ test_azure_endpoint
+ )
+ test_default_headers = {"Content-Type": "text/xml; charset=utf-8"}
- provisioning_success_status = 'Ready'
- provisioning_not_ready_status = 'NotReady'
- provisioning_failure_substatus = 'ProvisioningFailed'
+ provisioning_success_status = "Ready"
+ provisioning_not_ready_status = "NotReady"
+ provisioning_failure_substatus = "ProvisioningFailed"
provisioning_failure_err_description = (
- 'Test error message containing provisioning failure details')
+ "Test error message containing provisioning failure details"
+ )
def setUp(self):
super(TestGoalStateHealthReporter, self).setUp()
@@ -649,22 +699,28 @@ class TestGoalStateHealthReporter(CiTestCase):
self.addCleanup(patches.close)
patches.enter_context(
- mock.patch.object(azure_helper.time, 'sleep', mock.MagicMock()))
+ mock.patch.object(azure_helper.time, "sleep", mock.MagicMock())
+ )
self.read_file_or_url = patches.enter_context(
- mock.patch.object(azure_helper.url_helper, 'read_file_or_url'))
+ mock.patch.object(azure_helper.url_helper, "read_file_or_url")
+ )
self.post = patches.enter_context(
- mock.patch.object(azure_helper.AzureEndpointHttpClient,
- 'post'))
+ mock.patch.object(azure_helper.AzureEndpointHttpClient, "post")
+ )
self.GoalState = patches.enter_context(
- mock.patch.object(azure_helper, 'GoalState'))
- self.GoalState.return_value.container_id = \
- self.default_parameters['container_id']
- self.GoalState.return_value.instance_id = \
- self.default_parameters['instance_id']
- self.GoalState.return_value.incarnation = \
- self.default_parameters['incarnation']
+ mock.patch.object(azure_helper, "GoalState")
+ )
+ self.GoalState.return_value.container_id = self.default_parameters[
+ "container_id"
+ ]
+ self.GoalState.return_value.instance_id = self.default_parameters[
+ "instance_id"
+ ]
+ self.GoalState.return_value.incarnation = self.default_parameters[
+ "incarnation"
+ ]
def _text_from_xpath_in_xroot(self, xroot, xpath):
element = xroot.find(xpath)
@@ -680,34 +736,41 @@ class TestGoalStateHealthReporter(CiTestCase):
def _get_report_ready_health_document(self):
return self._get_formatted_health_report_xml_string(
- incarnation=escape(str(self.default_parameters['incarnation'])),
- container_id=escape(self.default_parameters['container_id']),
- instance_id=escape(self.default_parameters['instance_id']),
+ incarnation=escape(str(self.default_parameters["incarnation"])),
+ container_id=escape(self.default_parameters["container_id"]),
+ instance_id=escape(self.default_parameters["instance_id"]),
health_status=escape(self.provisioning_success_status),
- health_detail_subsection='')
+ health_detail_subsection="",
+ )
def _get_report_failure_health_document(self):
- health_detail_subsection = \
+ health_detail_subsection = (
self._get_formatted_health_detail_subsection_xml_string(
health_substatus=escape(self.provisioning_failure_substatus),
health_description=escape(
- self.provisioning_failure_err_description))
+ self.provisioning_failure_err_description
+ ),
+ )
+ )
return self._get_formatted_health_report_xml_string(
- incarnation=escape(str(self.default_parameters['incarnation'])),
- container_id=escape(self.default_parameters['container_id']),
- instance_id=escape(self.default_parameters['instance_id']),
+ incarnation=escape(str(self.default_parameters["incarnation"])),
+ container_id=escape(self.default_parameters["container_id"]),
+ instance_id=escape(self.default_parameters["instance_id"]),
health_status=escape(self.provisioning_not_ready_status),
- health_detail_subsection=health_detail_subsection)
+ health_detail_subsection=health_detail_subsection,
+ )
def test_send_ready_signal_sends_post_request(self):
with mock.patch.object(
- azure_helper.GoalStateHealthReporter,
- 'build_report') as m_build_report:
+ azure_helper.GoalStateHealthReporter, "build_report"
+ ) as m_build_report:
client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
reporter = azure_helper.GoalStateHealthReporter(
azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
- client, self.test_azure_endpoint)
+ client,
+ self.test_azure_endpoint,
+ )
reporter.send_ready_signal()
self.assertEqual(1, self.post.call_count)
@@ -715,73 +778,94 @@ class TestGoalStateHealthReporter(CiTestCase):
mock.call(
self.test_health_report_url,
data=m_build_report.return_value,
- extra_headers=self.test_default_headers),
- self.post.call_args)
+ extra_headers=self.test_default_headers,
+ ),
+ self.post.call_args,
+ )
def test_send_failure_signal_sends_post_request(self):
with mock.patch.object(
- azure_helper.GoalStateHealthReporter,
- 'build_report') as m_build_report:
+ azure_helper.GoalStateHealthReporter, "build_report"
+ ) as m_build_report:
client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
reporter = azure_helper.GoalStateHealthReporter(
azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
- client, self.test_azure_endpoint)
+ client,
+ self.test_azure_endpoint,
+ )
reporter.send_failure_signal(
- description=self.provisioning_failure_err_description)
+ description=self.provisioning_failure_err_description
+ )
self.assertEqual(1, self.post.call_count)
self.assertEqual(
mock.call(
self.test_health_report_url,
data=m_build_report.return_value,
- extra_headers=self.test_default_headers),
- self.post.call_args)
+ extra_headers=self.test_default_headers,
+ ),
+ self.post.call_args,
+ )
def test_build_report_for_ready_signal_health_document(self):
health_document = self._get_report_ready_health_document()
reporter = azure_helper.GoalStateHealthReporter(
azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
azure_helper.AzureEndpointHttpClient(mock.MagicMock()),
- self.test_azure_endpoint)
+ self.test_azure_endpoint,
+ )
generated_health_document = reporter.build_report(
- incarnation=self.default_parameters['incarnation'],
- container_id=self.default_parameters['container_id'],
- instance_id=self.default_parameters['instance_id'],
- status=self.provisioning_success_status)
+ incarnation=self.default_parameters["incarnation"],
+ container_id=self.default_parameters["container_id"],
+ instance_id=self.default_parameters["instance_id"],
+ status=self.provisioning_success_status,
+ )
self.assertEqual(health_document, generated_health_document)
generated_xroot = ElementTree.fromstring(generated_health_document)
self.assertEqual(
self._text_from_xpath_in_xroot(
- generated_xroot, './GoalStateIncarnation'),
- str(self.default_parameters['incarnation']))
+ generated_xroot, "./GoalStateIncarnation"
+ ),
+ str(self.default_parameters["incarnation"]),
+ )
self.assertEqual(
self._text_from_xpath_in_xroot(
- generated_xroot, './Container/ContainerId'),
- str(self.default_parameters['container_id']))
+ generated_xroot, "./Container/ContainerId"
+ ),
+ str(self.default_parameters["container_id"]),
+ )
self.assertEqual(
self._text_from_xpath_in_xroot(
- generated_xroot,
- './Container/RoleInstanceList/Role/InstanceId'),
- str(self.default_parameters['instance_id']))
+ generated_xroot, "./Container/RoleInstanceList/Role/InstanceId"
+ ),
+ str(self.default_parameters["instance_id"]),
+ )
self.assertEqual(
self._text_from_xpath_in_xroot(
generated_xroot,
- './Container/RoleInstanceList/Role/Health/State'),
- escape(self.provisioning_success_status))
+ "./Container/RoleInstanceList/Role/Health/State",
+ ),
+ escape(self.provisioning_success_status),
+ )
self.assertIsNone(
self._text_from_xpath_in_xroot(
generated_xroot,
- './Container/RoleInstanceList/Role/Health/Details'))
+ "./Container/RoleInstanceList/Role/Health/Details",
+ )
+ )
self.assertIsNone(
self._text_from_xpath_in_xroot(
generated_xroot,
- './Container/RoleInstanceList/Role/Health/Details/SubStatus'))
+ "./Container/RoleInstanceList/Role/Health/Details/SubStatus",
+ )
+ )
self.assertIsNone(
self._text_from_xpath_in_xroot(
generated_xroot,
- './Container/RoleInstanceList/Role/Health/Details/Description')
+ "./Container/RoleInstanceList/Role/Health/Details/Description",
+ )
)
def test_build_report_for_failure_signal_health_document(self):
@@ -789,120 +873,143 @@ class TestGoalStateHealthReporter(CiTestCase):
reporter = azure_helper.GoalStateHealthReporter(
azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
azure_helper.AzureEndpointHttpClient(mock.MagicMock()),
- self.test_azure_endpoint)
+ self.test_azure_endpoint,
+ )
generated_health_document = reporter.build_report(
- incarnation=self.default_parameters['incarnation'],
- container_id=self.default_parameters['container_id'],
- instance_id=self.default_parameters['instance_id'],
+ incarnation=self.default_parameters["incarnation"],
+ container_id=self.default_parameters["container_id"],
+ instance_id=self.default_parameters["instance_id"],
status=self.provisioning_not_ready_status,
substatus=self.provisioning_failure_substatus,
- description=self.provisioning_failure_err_description)
+ description=self.provisioning_failure_err_description,
+ )
self.assertEqual(health_document, generated_health_document)
generated_xroot = ElementTree.fromstring(generated_health_document)
self.assertEqual(
self._text_from_xpath_in_xroot(
- generated_xroot, './GoalStateIncarnation'),
- str(self.default_parameters['incarnation']))
+ generated_xroot, "./GoalStateIncarnation"
+ ),
+ str(self.default_parameters["incarnation"]),
+ )
self.assertEqual(
self._text_from_xpath_in_xroot(
- generated_xroot, './Container/ContainerId'),
- self.default_parameters['container_id'])
+ generated_xroot, "./Container/ContainerId"
+ ),
+ self.default_parameters["container_id"],
+ )
self.assertEqual(
self._text_from_xpath_in_xroot(
- generated_xroot,
- './Container/RoleInstanceList/Role/InstanceId'),
- self.default_parameters['instance_id'])
+ generated_xroot, "./Container/RoleInstanceList/Role/InstanceId"
+ ),
+ self.default_parameters["instance_id"],
+ )
self.assertEqual(
self._text_from_xpath_in_xroot(
generated_xroot,
- './Container/RoleInstanceList/Role/Health/State'),
- escape(self.provisioning_not_ready_status))
+ "./Container/RoleInstanceList/Role/Health/State",
+ ),
+ escape(self.provisioning_not_ready_status),
+ )
self.assertEqual(
self._text_from_xpath_in_xroot(
generated_xroot,
- './Container/RoleInstanceList/Role/Health/Details/'
- 'SubStatus'),
- escape(self.provisioning_failure_substatus))
+ "./Container/RoleInstanceList/Role/Health/Details/SubStatus",
+ ),
+ escape(self.provisioning_failure_substatus),
+ )
self.assertEqual(
self._text_from_xpath_in_xroot(
generated_xroot,
- './Container/RoleInstanceList/Role/Health/Details/'
- 'Description'),
- escape(self.provisioning_failure_err_description))
+ "./Container/RoleInstanceList/Role/Health/Details/Description",
+ ),
+ escape(self.provisioning_failure_err_description),
+ )
def test_send_ready_signal_calls_build_report(self):
with mock.patch.object(
- azure_helper.GoalStateHealthReporter, 'build_report'
+ azure_helper.GoalStateHealthReporter, "build_report"
) as m_build_report:
reporter = azure_helper.GoalStateHealthReporter(
azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
azure_helper.AzureEndpointHttpClient(mock.MagicMock()),
- self.test_azure_endpoint)
+ self.test_azure_endpoint,
+ )
reporter.send_ready_signal()
self.assertEqual(1, m_build_report.call_count)
self.assertEqual(
mock.call(
- incarnation=self.default_parameters['incarnation'],
- container_id=self.default_parameters['container_id'],
- instance_id=self.default_parameters['instance_id'],
- status=self.provisioning_success_status),
- m_build_report.call_args)
+ incarnation=self.default_parameters["incarnation"],
+ container_id=self.default_parameters["container_id"],
+ instance_id=self.default_parameters["instance_id"],
+ status=self.provisioning_success_status,
+ ),
+ m_build_report.call_args,
+ )
def test_send_failure_signal_calls_build_report(self):
with mock.patch.object(
- azure_helper.GoalStateHealthReporter, 'build_report'
+ azure_helper.GoalStateHealthReporter, "build_report"
) as m_build_report:
reporter = azure_helper.GoalStateHealthReporter(
azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
azure_helper.AzureEndpointHttpClient(mock.MagicMock()),
- self.test_azure_endpoint)
+ self.test_azure_endpoint,
+ )
reporter.send_failure_signal(
- description=self.provisioning_failure_err_description)
+ description=self.provisioning_failure_err_description
+ )
self.assertEqual(1, m_build_report.call_count)
self.assertEqual(
mock.call(
- incarnation=self.default_parameters['incarnation'],
- container_id=self.default_parameters['container_id'],
- instance_id=self.default_parameters['instance_id'],
+ incarnation=self.default_parameters["incarnation"],
+ container_id=self.default_parameters["container_id"],
+ instance_id=self.default_parameters["instance_id"],
status=self.provisioning_not_ready_status,
substatus=self.provisioning_failure_substatus,
- description=self.provisioning_failure_err_description),
- m_build_report.call_args)
+ description=self.provisioning_failure_err_description,
+ ),
+ m_build_report.call_args,
+ )
def test_build_report_escapes_chars(self):
- incarnation = 'jd8\'9*&^<\'A><A[p&o+\"SD()*&&&LKAJSD23'
- container_id = '&&<\"><><ds8\'9+7&d9a86!@($09asdl;<>'
- instance_id = 'Opo>>>jas\'&d;[p&fp\"a<<!!@&&'
- health_status = '&<897\"6&>&aa\'sd!@&!)((*<&>'
- health_substatus = '&as\"d<<a&s>d<\'^@!5&6<7'
- health_description = '&&&>!#$\"&&<as\'1!@$d&>><>&\"sd<67<]>>'
-
- health_detail_subsection = \
+ incarnation = "jd8'9*&^<'A><A[p&o+\"SD()*&&&LKAJSD23"
+ container_id = "&&<\"><><ds8'9+7&d9a86!@($09asdl;<>"
+ instance_id = "Opo>>>jas'&d;[p&fp\"a<<!!@&&"
+ health_status = "&<897\"6&>&aa'sd!@&!)((*<&>"
+ health_substatus = "&as\"d<<a&s>d<'^@!5&6<7"
+ health_description = '&&&>!#$"&&<as\'1!@$d&>><>&"sd<67<]>>'
+
+ health_detail_subsection = (
self._get_formatted_health_detail_subsection_xml_string(
health_substatus=escape(health_substatus),
- health_description=escape(health_description))
+ health_description=escape(health_description),
+ )
+ )
health_document = self._get_formatted_health_report_xml_string(
incarnation=escape(incarnation),
container_id=escape(container_id),
instance_id=escape(instance_id),
health_status=escape(health_status),
- health_detail_subsection=health_detail_subsection)
+ health_detail_subsection=health_detail_subsection,
+ )
reporter = azure_helper.GoalStateHealthReporter(
azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
azure_helper.AzureEndpointHttpClient(mock.MagicMock()),
- self.test_azure_endpoint)
+ self.test_azure_endpoint,
+ )
generated_health_document = reporter.build_report(
incarnation=incarnation,
container_id=container_id,
instance_id=instance_id,
status=health_status,
substatus=health_substatus,
- description=health_description)
+ description=health_description,
+ )
self.assertEqual(health_document, generated_health_document)
@@ -910,26 +1017,31 @@ class TestGoalStateHealthReporter(CiTestCase):
reporter = azure_helper.GoalStateHealthReporter(
azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
azure_helper.AzureEndpointHttpClient(mock.MagicMock()),
- self.test_azure_endpoint)
- long_err_msg = 'a9&ea8>>>e as1< d\"q2*&(^%\'a=5<' * 100
+ self.test_azure_endpoint,
+ )
+ long_err_msg = "a9&ea8>>>e as1< d\"q2*&(^%'a=5<" * 100
generated_health_document = reporter.build_report(
- incarnation=self.default_parameters['incarnation'],
- container_id=self.default_parameters['container_id'],
- instance_id=self.default_parameters['instance_id'],
+ incarnation=self.default_parameters["incarnation"],
+ container_id=self.default_parameters["container_id"],
+ instance_id=self.default_parameters["instance_id"],
status=self.provisioning_not_ready_status,
substatus=self.provisioning_failure_substatus,
- description=long_err_msg)
+ description=long_err_msg,
+ )
generated_xroot = ElementTree.fromstring(generated_health_document)
generated_health_report_description = self._text_from_xpath_in_xroot(
generated_xroot,
- './Container/RoleInstanceList/Role/Health/Details/Description')
+ "./Container/RoleInstanceList/Role/Health/Details/Description",
+ )
self.assertEqual(
len(unescape(generated_health_report_description)),
- HEALTH_REPORT_DESCRIPTION_TRIM_LEN)
+ HEALTH_REPORT_DESCRIPTION_TRIM_LEN,
+ )
def test_trim_description_then_escape_conforms_to_len_limits_worst_case(
- self):
+ self,
+ ):
"""When unescaped characters are XML-escaped, the length increases.
Char Escape String
< &lt;
@@ -958,150 +1070,176 @@ class TestGoalStateHealthReporter(CiTestCase):
reporter = azure_helper.GoalStateHealthReporter(
azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
azure_helper.AzureEndpointHttpClient(mock.MagicMock()),
- self.test_azure_endpoint)
- long_err_msg = '\'\"' * 10000
+ self.test_azure_endpoint,
+ )
+ long_err_msg = "'\"" * 10000
generated_health_document = reporter.build_report(
- incarnation=self.default_parameters['incarnation'],
- container_id=self.default_parameters['container_id'],
- instance_id=self.default_parameters['instance_id'],
+ incarnation=self.default_parameters["incarnation"],
+ container_id=self.default_parameters["container_id"],
+ instance_id=self.default_parameters["instance_id"],
status=self.provisioning_not_ready_status,
substatus=self.provisioning_failure_substatus,
- description=long_err_msg)
+ description=long_err_msg,
+ )
generated_xroot = ElementTree.fromstring(generated_health_document)
generated_health_report_description = self._text_from_xpath_in_xroot(
generated_xroot,
- './Container/RoleInstanceList/Role/Health/Details/Description')
+ "./Container/RoleInstanceList/Role/Health/Details/Description",
+ )
# The escaped description string should be less than
# the Azure platform limit for the escaped description string.
self.assertLessEqual(len(generated_health_report_description), 4096)
class TestWALinuxAgentShim(CiTestCase):
-
def setUp(self):
super(TestWALinuxAgentShim, self).setUp()
patches = ExitStack()
self.addCleanup(patches.close)
self.AzureEndpointHttpClient = patches.enter_context(
- mock.patch.object(azure_helper, 'AzureEndpointHttpClient'))
+ mock.patch.object(azure_helper, "AzureEndpointHttpClient")
+ )
self.find_endpoint = patches.enter_context(
- mock.patch.object(wa_shim, 'find_endpoint'))
+ mock.patch.object(wa_shim, "find_endpoint")
+ )
self.GoalState = patches.enter_context(
- mock.patch.object(azure_helper, 'GoalState'))
+ mock.patch.object(azure_helper, "GoalState")
+ )
self.OpenSSLManager = patches.enter_context(
- mock.patch.object(azure_helper, 'OpenSSLManager', autospec=True))
+ mock.patch.object(azure_helper, "OpenSSLManager", autospec=True)
+ )
patches.enter_context(
- mock.patch.object(azure_helper.time, 'sleep', mock.MagicMock()))
+ mock.patch.object(azure_helper.time, "sleep", mock.MagicMock())
+ )
- self.test_incarnation = 'TestIncarnation'
- self.test_container_id = 'TestContainerId'
- self.test_instance_id = 'TestInstanceId'
+ self.test_incarnation = "TestIncarnation"
+ self.test_container_id = "TestContainerId"
+ self.test_instance_id = "TestInstanceId"
self.GoalState.return_value.incarnation = self.test_incarnation
self.GoalState.return_value.container_id = self.test_container_id
self.GoalState.return_value.instance_id = self.test_instance_id
+ def test_eject_iso_is_called(self):
+ shim = wa_shim()
+ with mock.patch.object(
+ shim, "eject_iso", autospec=True
+ ) as m_eject_iso:
+ shim.register_with_azure_and_fetch_data(iso_dev="/dev/sr0")
+ m_eject_iso.assert_called_once_with("/dev/sr0")
+
def test_http_client_does_not_use_certificate_for_report_ready(self):
shim = wa_shim()
shim.register_with_azure_and_fetch_data()
self.assertEqual(
- [mock.call(None)],
- self.AzureEndpointHttpClient.call_args_list)
+ [mock.call(None)], self.AzureEndpointHttpClient.call_args_list
+ )
def test_http_client_does_not_use_certificate_for_report_failure(self):
shim = wa_shim()
- shim.register_with_azure_and_report_failure(description='TestDesc')
+ shim.register_with_azure_and_report_failure(description="TestDesc")
self.assertEqual(
- [mock.call(None)],
- self.AzureEndpointHttpClient.call_args_list)
+ [mock.call(None)], self.AzureEndpointHttpClient.call_args_list
+ )
def test_correct_url_used_for_goalstate_during_report_ready(self):
- self.find_endpoint.return_value = 'test_endpoint'
+ self.find_endpoint.return_value = "test_endpoint"
shim = wa_shim()
shim.register_with_azure_and_fetch_data()
m_get = self.AzureEndpointHttpClient.return_value.get
self.assertEqual(
- [mock.call('http://test_endpoint/machine/?comp=goalstate')],
- m_get.call_args_list)
+ [mock.call("http://test_endpoint/machine/?comp=goalstate")],
+ m_get.call_args_list,
+ )
self.assertEqual(
- [mock.call(
- m_get.return_value.contents,
- self.AzureEndpointHttpClient.return_value,
- False
- )],
- self.GoalState.call_args_list)
+ [
+ mock.call(
+ m_get.return_value.contents,
+ self.AzureEndpointHttpClient.return_value,
+ False,
+ )
+ ],
+ self.GoalState.call_args_list,
+ )
def test_correct_url_used_for_goalstate_during_report_failure(self):
- self.find_endpoint.return_value = 'test_endpoint'
+ self.find_endpoint.return_value = "test_endpoint"
shim = wa_shim()
- shim.register_with_azure_and_report_failure(description='TestDesc')
+ shim.register_with_azure_and_report_failure(description="TestDesc")
m_get = self.AzureEndpointHttpClient.return_value.get
self.assertEqual(
- [mock.call('http://test_endpoint/machine/?comp=goalstate')],
- m_get.call_args_list)
+ [mock.call("http://test_endpoint/machine/?comp=goalstate")],
+ m_get.call_args_list,
+ )
self.assertEqual(
- [mock.call(
- m_get.return_value.contents,
- self.AzureEndpointHttpClient.return_value,
- False
- )],
- self.GoalState.call_args_list)
+ [
+ mock.call(
+ m_get.return_value.contents,
+ self.AzureEndpointHttpClient.return_value,
+ False,
+ )
+ ],
+ self.GoalState.call_args_list,
+ )
def test_certificates_used_to_determine_public_keys(self):
# if register_with_azure_and_fetch_data() isn't passed some info about
# the user's public keys, there's no point in even trying to parse the
# certificates
shim = wa_shim()
- mypk = [{'fingerprint': 'fp1', 'path': 'path1'},
- {'fingerprint': 'fp3', 'path': 'path3', 'value': ''}]
- certs = {'fp1': 'expected-key',
- 'fp2': 'should-not-be-found',
- 'fp3': 'expected-no-value-key',
- }
+ mypk = [
+ {"fingerprint": "fp1", "path": "path1"},
+ {"fingerprint": "fp3", "path": "path3", "value": ""},
+ ]
+ certs = {
+ "fp1": "expected-key",
+ "fp2": "should-not-be-found",
+ "fp3": "expected-no-value-key",
+ }
sslmgr = self.OpenSSLManager.return_value
sslmgr.parse_certificates.return_value = certs
data = shim.register_with_azure_and_fetch_data(pubkey_info=mypk)
self.assertEqual(
[mock.call(self.GoalState.return_value.certificates_xml)],
- sslmgr.parse_certificates.call_args_list)
- self.assertIn('expected-key', data['public-keys'])
- self.assertIn('expected-no-value-key', data['public-keys'])
- self.assertNotIn('should-not-be-found', data['public-keys'])
+ sslmgr.parse_certificates.call_args_list,
+ )
+ self.assertIn("expected-key", data)
+ self.assertIn("expected-no-value-key", data)
+ self.assertNotIn("should-not-be-found", data)
def test_absent_certificates_produces_empty_public_keys(self):
- mypk = [{'fingerprint': 'fp1', 'path': 'path1'}]
+ mypk = [{"fingerprint": "fp1", "path": "path1"}]
self.GoalState.return_value.certificates_xml = None
shim = wa_shim()
data = shim.register_with_azure_and_fetch_data(pubkey_info=mypk)
- self.assertEqual([], data['public-keys'])
+ self.assertEqual([], data)
def test_correct_url_used_for_report_ready(self):
- self.find_endpoint.return_value = 'test_endpoint'
+ self.find_endpoint.return_value = "test_endpoint"
shim = wa_shim()
shim.register_with_azure_and_fetch_data()
- expected_url = 'http://test_endpoint/machine?comp=health'
+ expected_url = "http://test_endpoint/machine?comp=health"
self.assertEqual(
[mock.call(expected_url, data=mock.ANY, extra_headers=mock.ANY)],
- self.AzureEndpointHttpClient.return_value.post
- .call_args_list)
+ self.AzureEndpointHttpClient.return_value.post.call_args_list,
+ )
def test_correct_url_used_for_report_failure(self):
- self.find_endpoint.return_value = 'test_endpoint'
+ self.find_endpoint.return_value = "test_endpoint"
shim = wa_shim()
- shim.register_with_azure_and_report_failure(description='TestDesc')
- expected_url = 'http://test_endpoint/machine?comp=health'
+ shim.register_with_azure_and_report_failure(description="TestDesc")
+ expected_url = "http://test_endpoint/machine?comp=health"
self.assertEqual(
[mock.call(expected_url, data=mock.ANY, extra_headers=mock.ANY)],
- self.AzureEndpointHttpClient.return_value.post
- .call_args_list)
+ self.AzureEndpointHttpClient.return_value.post.call_args_list,
+ )
def test_goal_state_values_used_for_report_ready(self):
shim = wa_shim()
shim.register_with_azure_and_fetch_data()
posted_document = (
- self.AzureEndpointHttpClient.return_value.post
- .call_args[1]['data']
+ self.AzureEndpointHttpClient.return_value.post.call_args[1]["data"]
)
self.assertIn(self.test_incarnation, posted_document)
self.assertIn(self.test_container_id, posted_document)
@@ -1109,10 +1247,9 @@ class TestWALinuxAgentShim(CiTestCase):
def test_goal_state_values_used_for_report_failure(self):
shim = wa_shim()
- shim.register_with_azure_and_report_failure(description='TestDesc')
+ shim.register_with_azure_and_report_failure(description="TestDesc")
posted_document = (
- self.AzureEndpointHttpClient.return_value.post
- .call_args[1]['data']
+ self.AzureEndpointHttpClient.return_value.post.call_args[1]["data"]
)
self.assertIn(self.test_incarnation, posted_document)
self.assertIn(self.test_container_id, posted_document)
@@ -1125,57 +1262,66 @@ class TestWALinuxAgentShim(CiTestCase):
incarnation=escape(self.test_incarnation),
container_id=escape(self.test_container_id),
instance_id=escape(self.test_instance_id),
- health_status=escape('Ready'),
- health_detail_subsection='')
+ health_status=escape("Ready"),
+ health_detail_subsection="",
+ )
posted_document = (
- self.AzureEndpointHttpClient.return_value.post
- .call_args[1]['data'])
+ self.AzureEndpointHttpClient.return_value.post.call_args[1]["data"]
+ )
self.assertEqual(health_document, posted_document)
def test_xml_elems_in_report_failure_post(self):
shim = wa_shim()
- shim.register_with_azure_and_report_failure(description='TestDesc')
+ shim.register_with_azure_and_report_failure(description="TestDesc")
health_document = HEALTH_REPORT_XML_TEMPLATE.format(
incarnation=escape(self.test_incarnation),
container_id=escape(self.test_container_id),
instance_id=escape(self.test_instance_id),
- health_status=escape('NotReady'),
- health_detail_subsection=HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE
- .format(
- health_substatus=escape('ProvisioningFailed'),
- health_description=escape('TestDesc')))
+ health_status=escape("NotReady"),
+ health_detail_subsection=(
+ HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE.format(
+ health_substatus=escape("ProvisioningFailed"),
+ health_description=escape("TestDesc"),
+ )
+ ),
+ )
posted_document = (
- self.AzureEndpointHttpClient.return_value.post
- .call_args[1]['data'])
+ self.AzureEndpointHttpClient.return_value.post.call_args[1]["data"]
+ )
self.assertEqual(health_document, posted_document)
- @mock.patch.object(azure_helper, 'GoalStateHealthReporter', autospec=True)
+ @mock.patch.object(azure_helper, "GoalStateHealthReporter", autospec=True)
def test_register_with_azure_and_fetch_data_calls_send_ready_signal(
- self, m_goal_state_health_reporter):
+ self, m_goal_state_health_reporter
+ ):
shim = wa_shim()
shim.register_with_azure_and_fetch_data()
self.assertEqual(
1,
- m_goal_state_health_reporter.return_value.send_ready_signal
- .call_count)
+ m_goal_state_health_reporter.return_value.send_ready_signal.call_count, # noqa: E501
+ )
- @mock.patch.object(azure_helper, 'GoalStateHealthReporter', autospec=True)
+ @mock.patch.object(azure_helper, "GoalStateHealthReporter", autospec=True)
def test_register_with_azure_and_report_failure_calls_send_failure_signal(
- self, m_goal_state_health_reporter):
+ self, m_goal_state_health_reporter
+ ):
shim = wa_shim()
- shim.register_with_azure_and_report_failure(description='TestDesc')
- m_goal_state_health_reporter.return_value.send_failure_signal \
- .assert_called_once_with(description='TestDesc')
+ shim.register_with_azure_and_report_failure(description="TestDesc")
+ m_goal_state_health_reporter.return_value.send_failure_signal.assert_called_once_with( # noqa: E501
+ description="TestDesc"
+ )
def test_register_with_azure_and_report_failure_does_not_need_certificates(
- self):
+ self,
+ ):
shim = wa_shim()
with mock.patch.object(
- shim, '_fetch_goal_state_from_azure', autospec=True
+ shim, "_fetch_goal_state_from_azure", autospec=True
) as m_fetch_goal_state_from_azure:
- shim.register_with_azure_and_report_failure(description='TestDesc')
+ shim.register_with_azure_and_report_failure(description="TestDesc")
m_fetch_goal_state_from_azure.assert_called_once_with(
- need_certificate=False)
+ need_certificate=False
+ )
def test_clean_up_can_be_called_at_any_time(self):
shim = wa_shim()
@@ -1184,7 +1330,7 @@ class TestWALinuxAgentShim(CiTestCase):
def test_openssl_manager_not_instantiated_by_shim_report_status(self):
shim = wa_shim()
shim.register_with_azure_and_fetch_data()
- shim.register_with_azure_and_report_failure(description='TestDesc')
+ shim.register_with_azure_and_report_failure(description="TestDesc")
shim.clean_up()
self.OpenSSLManager.assert_not_called()
@@ -1196,177 +1342,204 @@ class TestWALinuxAgentShim(CiTestCase):
def test_clean_up_after_report_failure(self):
shim = wa_shim()
- shim.register_with_azure_and_report_failure(description='TestDesc')
+ shim.register_with_azure_and_report_failure(description="TestDesc")
shim.clean_up()
self.OpenSSLManager.return_value.clean_up.assert_not_called()
def test_fetch_goalstate_during_report_ready_raises_exc_on_get_exc(self):
- self.AzureEndpointHttpClient.return_value.get \
- .side_effect = SentinelException
+ self.AzureEndpointHttpClient.return_value.get.side_effect = (
+ SentinelException
+ )
shim = wa_shim()
- self.assertRaises(SentinelException,
- shim.register_with_azure_and_fetch_data)
+ self.assertRaises(
+ SentinelException, shim.register_with_azure_and_fetch_data
+ )
def test_fetch_goalstate_during_report_failure_raises_exc_on_get_exc(self):
- self.AzureEndpointHttpClient.return_value.get \
- .side_effect = SentinelException
+ self.AzureEndpointHttpClient.return_value.get.side_effect = (
+ SentinelException
+ )
shim = wa_shim()
- self.assertRaises(SentinelException,
- shim.register_with_azure_and_report_failure,
- description='TestDesc')
+ self.assertRaises(
+ SentinelException,
+ shim.register_with_azure_and_report_failure,
+ description="TestDesc",
+ )
def test_fetch_goalstate_during_report_ready_raises_exc_on_parse_exc(self):
self.GoalState.side_effect = SentinelException
shim = wa_shim()
- self.assertRaises(SentinelException,
- shim.register_with_azure_and_fetch_data)
+ self.assertRaises(
+ SentinelException, shim.register_with_azure_and_fetch_data
+ )
def test_fetch_goalstate_during_report_failure_raises_exc_on_parse_exc(
- self):
+ self,
+ ):
self.GoalState.side_effect = SentinelException
shim = wa_shim()
- self.assertRaises(SentinelException,
- shim.register_with_azure_and_report_failure,
- description='TestDesc')
+ self.assertRaises(
+ SentinelException,
+ shim.register_with_azure_and_report_failure,
+ description="TestDesc",
+ )
def test_failure_to_send_report_ready_health_doc_bubbles_up(self):
- self.AzureEndpointHttpClient.return_value.post \
- .side_effect = SentinelException
+ self.AzureEndpointHttpClient.return_value.post.side_effect = (
+ SentinelException
+ )
shim = wa_shim()
- self.assertRaises(SentinelException,
- shim.register_with_azure_and_fetch_data)
+ self.assertRaises(
+ SentinelException, shim.register_with_azure_and_fetch_data
+ )
def test_failure_to_send_report_failure_health_doc_bubbles_up(self):
- self.AzureEndpointHttpClient.return_value.post \
- .side_effect = SentinelException
+ self.AzureEndpointHttpClient.return_value.post.side_effect = (
+ SentinelException
+ )
shim = wa_shim()
- self.assertRaises(SentinelException,
- shim.register_with_azure_and_report_failure,
- description='TestDesc')
+ self.assertRaises(
+ SentinelException,
+ shim.register_with_azure_and_report_failure,
+ description="TestDesc",
+ )
class TestGetMetadataGoalStateXMLAndReportReadyToFabric(CiTestCase):
-
def setUp(self):
super(TestGetMetadataGoalStateXMLAndReportReadyToFabric, self).setUp()
patches = ExitStack()
self.addCleanup(patches.close)
self.m_shim = patches.enter_context(
- mock.patch.object(azure_helper, 'WALinuxAgentShim'))
+ mock.patch.object(azure_helper, "WALinuxAgentShim")
+ )
def test_data_from_shim_returned(self):
ret = azure_helper.get_metadata_from_fabric()
self.assertEqual(
- self.m_shim.return_value.register_with_azure_and_fetch_data
- .return_value,
- ret)
+ self.m_shim.return_value.register_with_azure_and_fetch_data.return_value, # noqa: E501
+ ret,
+ )
def test_success_calls_clean_up(self):
azure_helper.get_metadata_from_fabric()
self.assertEqual(1, self.m_shim.return_value.clean_up.call_count)
- def test_failure_in_registration_propagates_exc_and_calls_clean_up(
- self):
- self.m_shim.return_value.register_with_azure_and_fetch_data \
- .side_effect = SentinelException
- self.assertRaises(SentinelException,
- azure_helper.get_metadata_from_fabric)
+ def test_failure_in_registration_propagates_exc_and_calls_clean_up(self):
+ self.m_shim.return_value.register_with_azure_and_fetch_data.side_effect = ( # noqa: E501
+ SentinelException
+ )
+ self.assertRaises(
+ SentinelException, azure_helper.get_metadata_from_fabric
+ )
self.assertEqual(1, self.m_shim.return_value.clean_up.call_count)
def test_calls_shim_register_with_azure_and_fetch_data(self):
m_pubkey_info = mock.MagicMock()
- azure_helper.get_metadata_from_fabric(pubkey_info=m_pubkey_info)
+ azure_helper.get_metadata_from_fabric(
+ pubkey_info=m_pubkey_info, iso_dev="/dev/sr0"
+ )
self.assertEqual(
1,
- self.m_shim.return_value
- .register_with_azure_and_fetch_data.call_count)
+ self.m_shim.return_value.register_with_azure_and_fetch_data.call_count, # noqa: E501
+ )
self.assertEqual(
- mock.call(pubkey_info=m_pubkey_info),
- self.m_shim.return_value
- .register_with_azure_and_fetch_data.call_args)
+ mock.call(iso_dev="/dev/sr0", pubkey_info=m_pubkey_info),
+ self.m_shim.return_value.register_with_azure_and_fetch_data.call_args, # noqa: E501
+ )
def test_instantiates_shim_with_kwargs(self):
m_fallback_lease_file = mock.MagicMock()
m_dhcp_options = mock.MagicMock()
azure_helper.get_metadata_from_fabric(
- fallback_lease_file=m_fallback_lease_file,
- dhcp_opts=m_dhcp_options)
+ fallback_lease_file=m_fallback_lease_file, dhcp_opts=m_dhcp_options
+ )
self.assertEqual(1, self.m_shim.call_count)
self.assertEqual(
mock.call(
fallback_lease_file=m_fallback_lease_file,
- dhcp_options=m_dhcp_options),
- self.m_shim.call_args)
+ dhcp_options=m_dhcp_options,
+ ),
+ self.m_shim.call_args,
+ )
class TestGetMetadataGoalStateXMLAndReportFailureToFabric(CiTestCase):
-
def setUp(self):
super(
- TestGetMetadataGoalStateXMLAndReportFailureToFabric, self).setUp()
+ TestGetMetadataGoalStateXMLAndReportFailureToFabric, self
+ ).setUp()
patches = ExitStack()
self.addCleanup(patches.close)
self.m_shim = patches.enter_context(
- mock.patch.object(azure_helper, 'WALinuxAgentShim'))
+ mock.patch.object(azure_helper, "WALinuxAgentShim")
+ )
def test_success_calls_clean_up(self):
azure_helper.report_failure_to_fabric()
- self.assertEqual(
- 1,
- self.m_shim.return_value.clean_up.call_count)
+ self.assertEqual(1, self.m_shim.return_value.clean_up.call_count)
def test_failure_in_shim_report_failure_propagates_exc_and_calls_clean_up(
- self):
- self.m_shim.return_value.register_with_azure_and_report_failure \
- .side_effect = SentinelException
- self.assertRaises(SentinelException,
- azure_helper.report_failure_to_fabric)
- self.assertEqual(
- 1,
- self.m_shim.return_value.clean_up.call_count)
+ self,
+ ):
+ self.m_shim.return_value.register_with_azure_and_report_failure.side_effect = ( # noqa: E501
+ SentinelException
+ )
+ self.assertRaises(
+ SentinelException, azure_helper.report_failure_to_fabric
+ )
+ self.assertEqual(1, self.m_shim.return_value.clean_up.call_count)
def test_report_failure_to_fabric_with_desc_calls_shim_report_failure(
- self):
- azure_helper.report_failure_to_fabric(description='TestDesc')
- self.m_shim.return_value.register_with_azure_and_report_failure \
- .assert_called_once_with(description='TestDesc')
+ self,
+ ):
+ azure_helper.report_failure_to_fabric(description="TestDesc")
+ self.m_shim.return_value.register_with_azure_and_report_failure.assert_called_once_with( # noqa: E501
+ description="TestDesc"
+ )
def test_report_failure_to_fabric_with_no_desc_calls_shim_report_failure(
- self):
+ self,
+ ):
azure_helper.report_failure_to_fabric()
# default err message description should be shown to the user
# if no description is passed in
- self.m_shim.return_value.register_with_azure_and_report_failure \
- .assert_called_once_with(
- description=azure_helper
- .DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE)
+ self.m_shim.return_value.register_with_azure_and_report_failure.assert_called_once_with( # noqa: E501
+ description=(
+ azure_helper.DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE
+ )
+ )
def test_report_failure_to_fabric_empty_desc_calls_shim_report_failure(
- self):
- azure_helper.report_failure_to_fabric(description='')
+ self,
+ ):
+ azure_helper.report_failure_to_fabric(description="")
# default err message description should be shown to the user
# if an empty description is passed in
- self.m_shim.return_value.register_with_azure_and_report_failure \
- .assert_called_once_with(
- description=azure_helper
- .DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE)
+ self.m_shim.return_value.register_with_azure_and_report_failure.assert_called_once_with( # noqa: E501
+ description=(
+ azure_helper.DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE
+ )
+ )
def test_instantiates_shim_with_kwargs(self):
m_fallback_lease_file = mock.MagicMock()
m_dhcp_options = mock.MagicMock()
azure_helper.report_failure_to_fabric(
- fallback_lease_file=m_fallback_lease_file,
- dhcp_opts=m_dhcp_options)
+ fallback_lease_file=m_fallback_lease_file, dhcp_opts=m_dhcp_options
+ )
self.m_shim.assert_called_once_with(
fallback_lease_file=m_fallback_lease_file,
- dhcp_options=m_dhcp_options)
+ dhcp_options=m_dhcp_options,
+ )
class TestExtractIpAddressFromNetworkd(CiTestCase):
- azure_lease = dedent("""\
+ azure_lease = dedent(
+ """\
# This is private data. Do not parse.
ADDRESS=10.132.0.5
NETMASK=255.255.255.255
@@ -1385,7 +1558,8 @@ class TestExtractIpAddressFromNetworkd(CiTestCase):
ROUTES=10.132.0.1/32,0.0.0.0 0.0.0.0/0,10.132.0.1
CLIENTID=ff405663a200020000ab11332859494d7a8b4c
OPTION_245=624c3620
- """)
+ """
+ )
def setUp(self):
super(TestExtractIpAddressFromNetworkd, self).setUp()
@@ -1394,21 +1568,25 @@ class TestExtractIpAddressFromNetworkd(CiTestCase):
def test_no_valid_leases_is_none(self):
"""No valid leases should return None."""
self.assertIsNone(
- wa_shim._networkd_get_value_from_leases(self.lease_d))
+ wa_shim._networkd_get_value_from_leases(self.lease_d)
+ )
def test_option_245_is_found_in_single(self):
"""A single valid lease with 245 option should return it."""
- populate_dir(self.lease_d, {'9': self.azure_lease})
+ populate_dir(self.lease_d, {"9": self.azure_lease})
self.assertEqual(
- '624c3620', wa_shim._networkd_get_value_from_leases(self.lease_d))
+ "624c3620", wa_shim._networkd_get_value_from_leases(self.lease_d)
+ )
def test_option_245_not_found_returns_None(self):
"""A valid lease, but no option 245 should return None."""
populate_dir(
self.lease_d,
- {'9': self.azure_lease.replace("OPTION_245", "OPTION_999")})
+ {"9": self.azure_lease.replace("OPTION_245", "OPTION_999")},
+ )
self.assertIsNone(
- wa_shim._networkd_get_value_from_leases(self.lease_d))
+ wa_shim._networkd_get_value_from_leases(self.lease_d)
+ )
def test_multiple_returns_first(self):
"""Somewhat arbitrarily return the first address when multiple.
@@ -1418,10 +1596,14 @@ class TestExtractIpAddressFromNetworkd(CiTestCase):
myval = "624c3601"
populate_dir(
self.lease_d,
- {'9': self.azure_lease,
- '2': self.azure_lease.replace("624c3620", myval)})
+ {
+ "9": self.azure_lease,
+ "2": self.azure_lease.replace("624c3620", myval),
+ },
+ )
self.assertEqual(
- myval, wa_shim._networkd_get_value_from_leases(self.lease_d))
+ myval, wa_shim._networkd_get_value_from_leases(self.lease_d)
+ )
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_cloudsigma.py b/tests/unittests/sources/test_cloudsigma.py
index 7aa3b1d1..a2f26245 100644
--- a/tests/unittests/test_datasource/test_cloudsigma.py
+++ b/tests/unittests/sources/test_cloudsigma.py
@@ -2,13 +2,10 @@
import copy
+from cloudinit import distros, helpers, sources
from cloudinit.cs_utils import Cepko
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import sources
from cloudinit.sources import DataSourceCloudSigma
-
-from cloudinit.tests import helpers as test_helpers
+from tests.unittests import helpers as test_helpers
SERVER_CONTEXT = {
"cpu": 1000,
@@ -28,10 +25,10 @@ SERVER_CONTEXT = {
"vendor_data": {
"location": "zrh",
"cloudinit": "#cloud-config\n\n...",
- }
+ },
}
-DS_PATH = 'cloudinit.sources.DataSourceCloudSigma.DataSourceCloudSigma'
+DS_PATH = "cloudinit.sources.DataSourceCloudSigma.DataSourceCloudSigma"
class CepkoMock(Cepko):
@@ -45,41 +42,48 @@ class CepkoMock(Cepko):
class DataSourceCloudSigmaTest(test_helpers.CiTestCase):
def setUp(self):
super(DataSourceCloudSigmaTest, self).setUp()
- self.paths = helpers.Paths({'run_dir': self.tmp_dir()})
- self.add_patch(DS_PATH + '.is_running_in_cloudsigma',
- "m_is_container", return_value=True)
+ self.paths = helpers.Paths({"run_dir": self.tmp_dir()})
+ self.add_patch(
+ DS_PATH + ".is_running_in_cloudsigma",
+ "m_is_container",
+ return_value=True,
+ )
distro_cls = distros.fetch("ubuntu")
distro = distro_cls("ubuntu", cfg={}, paths=self.paths)
self.datasource = DataSourceCloudSigma.DataSourceCloudSigma(
- sys_cfg={}, distro=distro, paths=self.paths)
+ sys_cfg={}, distro=distro, paths=self.paths
+ )
self.datasource.cepko = CepkoMock(SERVER_CONTEXT)
def test_get_hostname(self):
self.datasource.get_data()
self.assertEqual("test_server", self.datasource.get_hostname())
- self.datasource.metadata['name'] = ''
+ self.datasource.metadata["name"] = ""
self.assertEqual("65b2fb23", self.datasource.get_hostname())
- utf8_hostname = b'\xd1\x82\xd0\xb5\xd1\x81\xd1\x82'.decode('utf-8')
- self.datasource.metadata['name'] = utf8_hostname
+ utf8_hostname = b"\xd1\x82\xd0\xb5\xd1\x81\xd1\x82".decode("utf-8")
+ self.datasource.metadata["name"] = utf8_hostname
self.assertEqual("65b2fb23", self.datasource.get_hostname())
def test_get_public_ssh_keys(self):
self.datasource.get_data()
- self.assertEqual([SERVER_CONTEXT['meta']['ssh_public_key']],
- self.datasource.get_public_ssh_keys())
+ self.assertEqual(
+ [SERVER_CONTEXT["meta"]["ssh_public_key"]],
+ self.datasource.get_public_ssh_keys(),
+ )
def test_get_instance_id(self):
self.datasource.get_data()
- self.assertEqual(SERVER_CONTEXT['uuid'],
- self.datasource.get_instance_id())
+ self.assertEqual(
+ SERVER_CONTEXT["uuid"], self.datasource.get_instance_id()
+ )
def test_platform(self):
"""All platform-related attributes are set."""
self.datasource.get_data()
- self.assertEqual(self.datasource.cloud_name, 'cloudsigma')
- self.assertEqual(self.datasource.platform_type, 'cloudsigma')
- self.assertEqual(self.datasource.subplatform, 'cepko (/dev/ttyS1)')
+ self.assertEqual(self.datasource.cloud_name, "cloudsigma")
+ self.assertEqual(self.datasource.platform_type, "cloudsigma")
+ self.assertEqual(self.datasource.subplatform, "cepko (/dev/ttyS1)")
def test_metadata(self):
self.datasource.get_data()
@@ -87,22 +91,26 @@ class DataSourceCloudSigmaTest(test_helpers.CiTestCase):
def test_user_data(self):
self.datasource.get_data()
- self.assertEqual(self.datasource.userdata_raw,
- SERVER_CONTEXT['meta']['cloudinit-user-data'])
+ self.assertEqual(
+ self.datasource.userdata_raw,
+ SERVER_CONTEXT["meta"]["cloudinit-user-data"],
+ )
def test_encoded_user_data(self):
encoded_context = copy.deepcopy(SERVER_CONTEXT)
- encoded_context['meta']['base64_fields'] = 'cloudinit-user-data'
- encoded_context['meta']['cloudinit-user-data'] = 'aGkgd29ybGQK'
+ encoded_context["meta"]["base64_fields"] = "cloudinit-user-data"
+ encoded_context["meta"]["cloudinit-user-data"] = "aGkgd29ybGQK"
self.datasource.cepko = CepkoMock(encoded_context)
self.datasource.get_data()
- self.assertEqual(self.datasource.userdata_raw, b'hi world\n')
+ self.assertEqual(self.datasource.userdata_raw, b"hi world\n")
def test_vendor_data(self):
self.datasource.get_data()
- self.assertEqual(self.datasource.vendordata_raw,
- SERVER_CONTEXT['vendor_data']['cloudinit'])
+ self.assertEqual(
+ self.datasource.vendordata_raw,
+ SERVER_CONTEXT["vendor_data"]["cloudinit"],
+ )
def test_lack_of_vendor_data(self):
stripped_context = copy.deepcopy(SERVER_CONTEXT)
@@ -125,13 +133,13 @@ class DsLoads(test_helpers.TestCase):
def test_get_datasource_list_returns_in_local(self):
deps = (sources.DEP_FILESYSTEM,)
ds_list = DataSourceCloudSigma.get_datasource_list(deps)
- self.assertEqual(ds_list,
- [DataSourceCloudSigma.DataSourceCloudSigma])
+ self.assertEqual(ds_list, [DataSourceCloudSigma.DataSourceCloudSigma])
def test_list_sources_finds_ds(self):
found = sources.list_sources(
- ['CloudSigma'], (sources.DEP_FILESYSTEM,), ['cloudinit.sources'])
- self.assertEqual([DataSourceCloudSigma.DataSourceCloudSigma],
- found)
+ ["CloudSigma"], (sources.DEP_FILESYSTEM,), ["cloudinit.sources"]
+ )
+ self.assertEqual([DataSourceCloudSigma.DataSourceCloudSigma], found)
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_cloudstack.py b/tests/unittests/sources/test_cloudstack.py
index e68168f2..f7c69f91 100644
--- a/tests/unittests/test_datasource/test_cloudstack.py
+++ b/tests/unittests/sources/test_cloudstack.py
@@ -1,80 +1,90 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import helpers
-from cloudinit import util
-from cloudinit.sources.DataSourceCloudStack import (
- DataSourceCloudStack, get_latest_lease)
-
-from cloudinit.tests.helpers import CiTestCase, ExitStack, mock
-
import os
import time
-MOD_PATH = 'cloudinit.sources.DataSourceCloudStack'
-DS_PATH = MOD_PATH + '.DataSourceCloudStack'
+from cloudinit import helpers, util
+from cloudinit.sources.DataSourceCloudStack import (
+ DataSourceCloudStack,
+ get_latest_lease,
+)
+from tests.unittests.helpers import CiTestCase, ExitStack, mock
+MOD_PATH = "cloudinit.sources.DataSourceCloudStack"
+DS_PATH = MOD_PATH + ".DataSourceCloudStack"
-class TestCloudStackPasswordFetching(CiTestCase):
+class TestCloudStackPasswordFetching(CiTestCase):
def setUp(self):
super(TestCloudStackPasswordFetching, self).setUp()
self.patches = ExitStack()
self.addCleanup(self.patches.close)
mod_name = MOD_PATH
- self.patches.enter_context(mock.patch('{0}.ec2'.format(mod_name)))
- self.patches.enter_context(mock.patch('{0}.uhelp'.format(mod_name)))
+ self.patches.enter_context(mock.patch("{0}.ec2".format(mod_name)))
+ self.patches.enter_context(mock.patch("{0}.uhelp".format(mod_name)))
default_gw = "192.201.20.0"
get_latest_lease = mock.MagicMock(return_value=None)
- self.patches.enter_context(mock.patch(
- mod_name + '.get_latest_lease', get_latest_lease))
+ self.patches.enter_context(
+ mock.patch(mod_name + ".get_latest_lease", get_latest_lease)
+ )
get_default_gw = mock.MagicMock(return_value=default_gw)
- self.patches.enter_context(mock.patch(
- mod_name + '.get_default_gateway', get_default_gw))
+ self.patches.enter_context(
+ mock.patch(mod_name + ".get_default_gateway", get_default_gw)
+ )
get_networkd_server_address = mock.MagicMock(return_value=None)
- self.patches.enter_context(mock.patch(
- mod_name + '.dhcp.networkd_get_option_from_leases',
- get_networkd_server_address))
+ self.patches.enter_context(
+ mock.patch(
+ mod_name + ".dhcp.networkd_get_option_from_leases",
+ get_networkd_server_address,
+ )
+ )
self.tmp = self.tmp_dir()
def _set_password_server_response(self, response_string):
- subp = mock.MagicMock(return_value=(response_string, ''))
+ subp = mock.MagicMock(return_value=(response_string, ""))
self.patches.enter_context(
- mock.patch('cloudinit.sources.DataSourceCloudStack.subp.subp',
- subp))
+ mock.patch(
+ "cloudinit.sources.DataSourceCloudStack.subp.subp", subp
+ )
+ )
return subp
def test_empty_password_doesnt_create_config(self):
- self._set_password_server_response('')
+ self._set_password_server_response("")
ds = DataSourceCloudStack(
- {}, None, helpers.Paths({'run_dir': self.tmp}))
+ {}, None, helpers.Paths({"run_dir": self.tmp})
+ )
ds.get_data()
self.assertEqual({}, ds.get_config_obj())
def test_saved_password_doesnt_create_config(self):
- self._set_password_server_response('saved_password')
+ self._set_password_server_response("saved_password")
ds = DataSourceCloudStack(
- {}, None, helpers.Paths({'run_dir': self.tmp}))
+ {}, None, helpers.Paths({"run_dir": self.tmp})
+ )
ds.get_data()
self.assertEqual({}, ds.get_config_obj())
- @mock.patch(DS_PATH + '.wait_for_metadata_service')
+ @mock.patch(DS_PATH + ".wait_for_metadata_service")
def test_password_sets_password(self, m_wait):
m_wait.return_value = True
- password = 'SekritSquirrel'
+ password = "SekritSquirrel"
self._set_password_server_response(password)
ds = DataSourceCloudStack(
- {}, None, helpers.Paths({'run_dir': self.tmp}))
+ {}, None, helpers.Paths({"run_dir": self.tmp})
+ )
ds.get_data()
- self.assertEqual(password, ds.get_config_obj()['password'])
+ self.assertEqual(password, ds.get_config_obj()["password"])
- @mock.patch(DS_PATH + '.wait_for_metadata_service')
+ @mock.patch(DS_PATH + ".wait_for_metadata_service")
def test_bad_request_doesnt_stop_ds_from_working(self, m_wait):
m_wait.return_value = True
- self._set_password_server_response('bad_request')
+ self._set_password_server_response("bad_request")
ds = DataSourceCloudStack(
- {}, None, helpers.Paths({'run_dir': self.tmp}))
+ {}, None, helpers.Paths({"run_dir": self.tmp})
+ )
self.assertTrue(ds.get_data())
def assertRequestTypesSent(self, subp, expected_request_types):
@@ -82,42 +92,44 @@ class TestCloudStackPasswordFetching(CiTestCase):
for call in subp.call_args_list:
args = call[0][0]
for arg in args:
- if arg.startswith('DomU_Request'):
+ if arg.startswith("DomU_Request"):
request_types.append(arg.split()[1])
self.assertEqual(expected_request_types, request_types)
- @mock.patch(DS_PATH + '.wait_for_metadata_service')
+ @mock.patch(DS_PATH + ".wait_for_metadata_service")
def test_valid_response_means_password_marked_as_saved(self, m_wait):
m_wait.return_value = True
- password = 'SekritSquirrel'
+ password = "SekritSquirrel"
subp = self._set_password_server_response(password)
ds = DataSourceCloudStack(
- {}, None, helpers.Paths({'run_dir': self.tmp}))
+ {}, None, helpers.Paths({"run_dir": self.tmp})
+ )
ds.get_data()
- self.assertRequestTypesSent(subp,
- ['send_my_password', 'saved_password'])
+ self.assertRequestTypesSent(
+ subp, ["send_my_password", "saved_password"]
+ )
def _check_password_not_saved_for(self, response_string):
subp = self._set_password_server_response(response_string)
ds = DataSourceCloudStack(
- {}, None, helpers.Paths({'run_dir': self.tmp}))
- with mock.patch(DS_PATH + '.wait_for_metadata_service') as m_wait:
+ {}, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ with mock.patch(DS_PATH + ".wait_for_metadata_service") as m_wait:
m_wait.return_value = True
ds.get_data()
- self.assertRequestTypesSent(subp, ['send_my_password'])
+ self.assertRequestTypesSent(subp, ["send_my_password"])
def test_password_not_saved_if_empty(self):
- self._check_password_not_saved_for('')
+ self._check_password_not_saved_for("")
def test_password_not_saved_if_already_saved(self):
- self._check_password_not_saved_for('saved_password')
+ self._check_password_not_saved_for("saved_password")
def test_password_not_saved_if_bad_request(self):
- self._check_password_not_saved_for('bad_request')
+ self._check_password_not_saved_for("bad_request")
class TestGetLatestLease(CiTestCase):
-
def _populate_dir_list(self, bdir, files):
"""populate_dir_list([(name, data), (name, data)])
@@ -133,8 +145,9 @@ class TestGetLatestLease(CiTestCase):
def _pop_and_test(self, files, expected):
lease_d = self.tmp_dir()
self._populate_dir_list(lease_d, files)
- self.assertEqual(self.tmp_path(expected, lease_d),
- get_latest_lease(lease_d))
+ self.assertEqual(
+ self.tmp_path(expected, lease_d), get_latest_lease(lease_d)
+ )
def test_skips_dhcpv6_files(self):
"""files started with dhclient6 should be skipped."""
@@ -161,9 +174,15 @@ class TestGetLatestLease(CiTestCase):
def test_ignores_by_extension(self):
"""only .lease or .leases file should be considered."""
- self._pop_and_test(["dhclient.lease", "dhclient.lease.bk",
- "dhclient.lease-old", "dhclient.leaselease"],
- "dhclient.lease")
+ self._pop_and_test(
+ [
+ "dhclient.lease",
+ "dhclient.lease.bk",
+ "dhclient.lease-old",
+ "dhclient.leaselease",
+ ],
+ "dhclient.lease",
+ )
def test_selects_newest_matching(self):
"""If multiple files match, the newest written should be used."""
diff --git a/tests/unittests/sources/test_common.py b/tests/unittests/sources/test_common.py
new file mode 100644
index 00000000..a5bdb629
--- /dev/null
+++ b/tests/unittests/sources/test_common.py
@@ -0,0 +1,123 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit import settings, sources, type_utils
+from cloudinit.sources import DataSource
+from cloudinit.sources import DataSourceAliYun as AliYun
+from cloudinit.sources import DataSourceAltCloud as AltCloud
+from cloudinit.sources import DataSourceAzure as Azure
+from cloudinit.sources import DataSourceBigstep as Bigstep
+from cloudinit.sources import DataSourceCloudSigma as CloudSigma
+from cloudinit.sources import DataSourceCloudStack as CloudStack
+from cloudinit.sources import DataSourceConfigDrive as ConfigDrive
+from cloudinit.sources import DataSourceDigitalOcean as DigitalOcean
+from cloudinit.sources import DataSourceEc2 as Ec2
+from cloudinit.sources import DataSourceExoscale as Exoscale
+from cloudinit.sources import DataSourceGCE as GCE
+from cloudinit.sources import DataSourceHetzner as Hetzner
+from cloudinit.sources import DataSourceIBMCloud as IBMCloud
+from cloudinit.sources import DataSourceLXD as LXD
+from cloudinit.sources import DataSourceMAAS as MAAS
+from cloudinit.sources import DataSourceNoCloud as NoCloud
+from cloudinit.sources import DataSourceNone as DSNone
+from cloudinit.sources import DataSourceOpenNebula as OpenNebula
+from cloudinit.sources import DataSourceOpenStack as OpenStack
+from cloudinit.sources import DataSourceOracle as Oracle
+from cloudinit.sources import DataSourceOVF as OVF
+from cloudinit.sources import DataSourceRbxCloud as RbxCloud
+from cloudinit.sources import DataSourceScaleway as Scaleway
+from cloudinit.sources import DataSourceSmartOS as SmartOS
+from cloudinit.sources import DataSourceUpCloud as UpCloud
+from cloudinit.sources import DataSourceVMware as VMware
+from cloudinit.sources import DataSourceVultr as Vultr
+from tests.unittests import helpers as test_helpers
+
+DEFAULT_LOCAL = [
+ Azure.DataSourceAzure,
+ CloudSigma.DataSourceCloudSigma,
+ ConfigDrive.DataSourceConfigDrive,
+ DigitalOcean.DataSourceDigitalOcean,
+ GCE.DataSourceGCELocal,
+ Hetzner.DataSourceHetzner,
+ IBMCloud.DataSourceIBMCloud,
+ LXD.DataSourceLXD,
+ NoCloud.DataSourceNoCloud,
+ OpenNebula.DataSourceOpenNebula,
+ Oracle.DataSourceOracle,
+ OVF.DataSourceOVF,
+ SmartOS.DataSourceSmartOS,
+ Vultr.DataSourceVultr,
+ Ec2.DataSourceEc2Local,
+ OpenStack.DataSourceOpenStackLocal,
+ RbxCloud.DataSourceRbxCloud,
+ Scaleway.DataSourceScaleway,
+ UpCloud.DataSourceUpCloudLocal,
+ VMware.DataSourceVMware,
+]
+
+DEFAULT_NETWORK = [
+ AliYun.DataSourceAliYun,
+ AltCloud.DataSourceAltCloud,
+ Bigstep.DataSourceBigstep,
+ CloudStack.DataSourceCloudStack,
+ DSNone.DataSourceNone,
+ Ec2.DataSourceEc2,
+ Exoscale.DataSourceExoscale,
+ GCE.DataSourceGCE,
+ MAAS.DataSourceMAAS,
+ NoCloud.DataSourceNoCloudNet,
+ OpenStack.DataSourceOpenStack,
+ OVF.DataSourceOVFNet,
+ UpCloud.DataSourceUpCloud,
+ VMware.DataSourceVMware,
+]
+
+
+class ExpectedDataSources(test_helpers.TestCase):
+ builtin_list = settings.CFG_BUILTIN["datasource_list"]
+ deps_local = [sources.DEP_FILESYSTEM]
+ deps_network = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK]
+ pkg_list = [type_utils.obj_name(sources)]
+
+ def test_expected_default_local_sources_found(self):
+ found = sources.list_sources(
+ self.builtin_list, self.deps_local, self.pkg_list
+ )
+ self.assertEqual(set(DEFAULT_LOCAL), set(found))
+
+ def test_expected_default_network_sources_found(self):
+ found = sources.list_sources(
+ self.builtin_list, self.deps_network, self.pkg_list
+ )
+ self.assertEqual(set(DEFAULT_NETWORK), set(found))
+
+ def test_expected_nondefault_network_sources_found(self):
+ found = sources.list_sources(
+ ["AliYun"], self.deps_network, self.pkg_list
+ )
+ self.assertEqual(set([AliYun.DataSourceAliYun]), set(found))
+
+
+class TestDataSourceInvariants(test_helpers.TestCase):
+ def test_data_sources_have_valid_network_config_sources(self):
+ for ds in DEFAULT_LOCAL + DEFAULT_NETWORK:
+ for cfg_src in ds.network_config_sources:
+ fail_msg = (
+ "{} has an invalid network_config_sources entry:"
+ " {}".format(str(ds), cfg_src)
+ )
+ self.assertTrue(
+ hasattr(sources.NetworkConfigSource, cfg_src), fail_msg
+ )
+
+ def test_expected_dsname_defined(self):
+ for ds in DEFAULT_LOCAL + DEFAULT_NETWORK:
+ fail_msg = (
+ "{} has an invalid / missing dsname property: {}".format(
+ str(ds), str(ds.dsname)
+ )
+ )
+ self.assertNotEqual(ds.dsname, DataSource.dsname, fail_msg)
+ self.assertIsNotNone(ds.dsname)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_configdrive.py b/tests/unittests/sources/test_configdrive.py
new file mode 100644
index 00000000..1fc40a0e
--- /dev/null
+++ b/tests/unittests/sources/test_configdrive.py
@@ -0,0 +1,1068 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import json
+import os
+from copy import copy, deepcopy
+
+from cloudinit import helpers, settings, util
+from cloudinit.net import eni, network_state
+from cloudinit.sources import DataSourceConfigDrive as ds
+from cloudinit.sources.helpers import openstack
+from tests.unittests.helpers import CiTestCase, ExitStack, mock, populate_dir
+
+PUBKEY = "ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n"
+EC2_META = {
+ "ami-id": "ami-00000001",
+ "ami-launch-index": 0,
+ "ami-manifest-path": "FIXME",
+ "block-device-mapping": {
+ "ami": "sda1",
+ "ephemeral0": "sda2",
+ "root": "/dev/sda1",
+ "swap": "sda3",
+ },
+ "hostname": "sm-foo-test.novalocal",
+ "instance-action": "none",
+ "instance-id": "i-00000001",
+ "instance-type": "m1.tiny",
+ "local-hostname": "sm-foo-test.novalocal",
+ "local-ipv4": None,
+ "placement": {"availability-zone": "nova"},
+ "public-hostname": "sm-foo-test.novalocal",
+ "public-ipv4": "",
+ "public-keys": {"0": {"openssh-key": PUBKEY}},
+ "reservation-id": "r-iru5qm4m",
+ "security-groups": ["default"],
+}
+USER_DATA = b"#!/bin/sh\necho This is user data\n"
+OSTACK_META = {
+ "availability_zone": "nova",
+ "files": [
+ {"content_path": "/content/0000", "path": "/etc/foo.cfg"},
+ {"content_path": "/content/0001", "path": "/etc/bar/bar.cfg"},
+ ],
+ "hostname": "sm-foo-test.novalocal",
+ "meta": {"dsmode": "local", "my-meta": "my-value"},
+ "name": "sm-foo-test",
+ "public_keys": {"mykey": PUBKEY},
+ "uuid": "b0fa911b-69d4-4476-bbe2-1c92bff6535c",
+}
+
+CONTENT_0 = b"This is contents of /etc/foo.cfg\n"
+CONTENT_1 = b"# this is /etc/bar/bar.cfg\n"
+NETWORK_DATA = {
+ "services": [
+ {"type": "dns", "address": "199.204.44.24"},
+ {"type": "dns", "address": "199.204.47.54"},
+ ],
+ "links": [
+ {
+ "vif_id": "2ecc7709-b3f7-4448-9580-e1ec32d75bbd",
+ "ethernet_mac_address": "fa:16:3e:69:b0:58",
+ "type": "ovs",
+ "mtu": None,
+ "id": "tap2ecc7709-b3",
+ },
+ {
+ "vif_id": "2f88d109-5b57-40e6-af32-2472df09dc33",
+ "ethernet_mac_address": "fa:16:3e:d4:57:ad",
+ "type": "ovs",
+ "mtu": None,
+ "id": "tap2f88d109-5b",
+ },
+ {
+ "vif_id": "1a5382f8-04c5-4d75-ab98-d666c1ef52cc",
+ "ethernet_mac_address": "fa:16:3e:05:30:fe",
+ "type": "ovs",
+ "mtu": None,
+ "id": "tap1a5382f8-04",
+ "name": "nic0",
+ },
+ ],
+ "networks": [
+ {
+ "link": "tap2ecc7709-b3",
+ "type": "ipv4_dhcp",
+ "network_id": "6d6357ac-0f70-4afa-8bd7-c274cc4ea235",
+ "id": "network0",
+ },
+ {
+ "link": "tap2f88d109-5b",
+ "type": "ipv4_dhcp",
+ "network_id": "d227a9b3-6960-4d94-8976-ee5788b44f54",
+ "id": "network1",
+ },
+ {
+ "link": "tap1a5382f8-04",
+ "type": "ipv4_dhcp",
+ "network_id": "dab2ba57-cae2-4311-a5ed-010b263891f5",
+ "id": "network2",
+ },
+ ],
+}
+
+NETWORK_DATA_2 = {
+ "services": [
+ {"type": "dns", "address": "1.1.1.191"},
+ {"type": "dns", "address": "1.1.1.4"},
+ ],
+ "networks": [
+ {
+ "network_id": "d94bbe94-7abc-48d4-9c82-4628ea26164a",
+ "type": "ipv4",
+ "netmask": "255.255.255.248",
+ "link": "eth0",
+ "routes": [
+ {
+ "netmask": "0.0.0.0",
+ "network": "0.0.0.0",
+ "gateway": "2.2.2.9",
+ }
+ ],
+ "ip_address": "2.2.2.10",
+ "id": "network0-ipv4",
+ },
+ {
+ "network_id": "ca447c83-6409-499b-aaef-6ad1ae995348",
+ "type": "ipv4",
+ "netmask": "255.255.255.224",
+ "link": "eth1",
+ "routes": [],
+ "ip_address": "3.3.3.24",
+ "id": "network1-ipv4",
+ },
+ ],
+ "links": [
+ {
+ "ethernet_mac_address": "fa:16:3e:dd:50:9a",
+ "mtu": 1500,
+ "type": "vif",
+ "id": "eth0",
+ "vif_id": "vif-foo1",
+ },
+ {
+ "ethernet_mac_address": "fa:16:3e:a8:14:69",
+ "mtu": 1500,
+ "type": "vif",
+ "id": "eth1",
+ "vif_id": "vif-foo2",
+ },
+ ],
+}
+
+# This network data ha 'tap' or null type for a link.
+NETWORK_DATA_3 = {
+ "services": [
+ {"type": "dns", "address": "172.16.36.11"},
+ {"type": "dns", "address": "172.16.36.12"},
+ ],
+ "networks": [
+ {
+ "network_id": "7c41450c-ba44-401a-9ab1-1604bb2da51e",
+ "type": "ipv4",
+ "netmask": "255.255.255.128",
+ "link": "tap77a0dc5b-72",
+ "ip_address": "172.17.48.18",
+ "id": "network0",
+ "routes": [
+ {
+ "netmask": "0.0.0.0",
+ "network": "0.0.0.0",
+ "gateway": "172.17.48.1",
+ }
+ ],
+ },
+ {
+ "network_id": "7c41450c-ba44-401a-9ab1-1604bb2da51e",
+ "type": "ipv6",
+ "netmask": "ffff:ffff:ffff:ffff::",
+ "link": "tap77a0dc5b-72",
+ "ip_address": "fdb8:52d0:9d14:0:f816:3eff:fe9f:70d",
+ "id": "network1",
+ "routes": [
+ {
+ "netmask": "::",
+ "network": "::",
+ "gateway": "fdb8:52d0:9d14::1",
+ }
+ ],
+ },
+ {
+ "network_id": "1f53cb0e-72d3-47c7-94b9-ff4397c5fe54",
+ "type": "ipv4",
+ "netmask": "255.255.255.128",
+ "link": "tap7d6b7bec-93",
+ "ip_address": "172.16.48.13",
+ "id": "network2",
+ "routes": [
+ {
+ "netmask": "0.0.0.0",
+ "network": "0.0.0.0",
+ "gateway": "172.16.48.1",
+ },
+ {
+ "netmask": "255.255.0.0",
+ "network": "172.16.0.0",
+ "gateway": "172.16.48.1",
+ },
+ ],
+ },
+ ],
+ "links": [
+ {
+ "ethernet_mac_address": "fa:16:3e:dd:50:9a",
+ "mtu": None,
+ "type": "tap",
+ "id": "tap77a0dc5b-72",
+ "vif_id": "77a0dc5b-720e-41b7-bfa7-1b2ff62e0d48",
+ },
+ {
+ "ethernet_mac_address": "fa:16:3e:a8:14:69",
+ "mtu": None,
+ "type": None,
+ "id": "tap7d6b7bec-93",
+ "vif_id": "7d6b7bec-93e6-4c03-869a-ddc5014892d5",
+ },
+ ],
+}
+
+BOND_MAC = "fa:16:3e:b3:72:36"
+NETWORK_DATA_BOND = {
+ "services": [
+ {"type": "dns", "address": "1.1.1.191"},
+ {"type": "dns", "address": "1.1.1.4"},
+ ],
+ "networks": [
+ {
+ "id": "network2-ipv4",
+ "ip_address": "2.2.2.13",
+ "link": "vlan2",
+ "netmask": "255.255.255.248",
+ "network_id": "4daf5ce8-38cf-4240-9f1a-04e86d7c6117",
+ "type": "ipv4",
+ "routes": [
+ {
+ "netmask": "0.0.0.0",
+ "network": "0.0.0.0",
+ "gateway": "2.2.2.9",
+ }
+ ],
+ },
+ {
+ "id": "network3-ipv4",
+ "ip_address": "10.0.1.5",
+ "link": "vlan3",
+ "netmask": "255.255.255.248",
+ "network_id": "a9e2f47c-3c43-4782-94d0-e1eeef1c8c9d",
+ "type": "ipv4",
+ "routes": [
+ {
+ "netmask": "255.255.255.255",
+ "network": "192.168.1.0",
+ "gateway": "10.0.1.1",
+ }
+ ],
+ },
+ ],
+ "links": [
+ {
+ "ethernet_mac_address": "0c:c4:7a:34:6e:3c",
+ "id": "eth0",
+ "mtu": 1500,
+ "type": "phy",
+ },
+ {
+ "ethernet_mac_address": "0c:c4:7a:34:6e:3d",
+ "id": "eth1",
+ "mtu": 1500,
+ "type": "phy",
+ },
+ {
+ "bond_links": ["eth0", "eth1"],
+ "bond_miimon": 100,
+ "bond_mode": "4",
+ "bond_xmit_hash_policy": "layer3+4",
+ "ethernet_mac_address": BOND_MAC,
+ "id": "bond0",
+ "type": "bond",
+ },
+ {
+ "ethernet_mac_address": "fa:16:3e:b3:72:30",
+ "id": "vlan2",
+ "type": "vlan",
+ "vlan_id": 602,
+ "vlan_link": "bond0",
+ "vlan_mac_address": "fa:16:3e:b3:72:30",
+ },
+ {
+ "ethernet_mac_address": "fa:16:3e:66:ab:a6",
+ "id": "vlan3",
+ "type": "vlan",
+ "vlan_id": 612,
+ "vlan_link": "bond0",
+ "vlan_mac_address": "fa:16:3e:66:ab:a6",
+ },
+ ],
+}
+
+NETWORK_DATA_VLAN = {
+ "services": [{"type": "dns", "address": "1.1.1.191"}],
+ "networks": [
+ {
+ "id": "network1-ipv4",
+ "ip_address": "10.0.1.5",
+ "link": "vlan1",
+ "netmask": "255.255.255.248",
+ "network_id": "a9e2f47c-3c43-4782-94d0-e1eeef1c8c9d",
+ "type": "ipv4",
+ "routes": [
+ {
+ "netmask": "255.255.255.255",
+ "network": "192.168.1.0",
+ "gateway": "10.0.1.1",
+ }
+ ],
+ }
+ ],
+ "links": [
+ {
+ "ethernet_mac_address": "fa:16:3e:69:b0:58",
+ "id": "eth0",
+ "mtu": 1500,
+ "type": "phy",
+ },
+ {
+ "ethernet_mac_address": "fa:16:3e:b3:72:30",
+ "id": "vlan1",
+ "type": "vlan",
+ "vlan_id": 602,
+ "vlan_link": "eth0",
+ "vlan_mac_address": "fa:16:3e:b3:72:30",
+ },
+ ],
+}
+
+KNOWN_MACS = {
+ "fa:16:3e:69:b0:58": "enp0s1",
+ "fa:16:3e:d4:57:ad": "enp0s2",
+ "fa:16:3e:dd:50:9a": "foo1",
+ "fa:16:3e:a8:14:69": "foo2",
+ "fa:16:3e:ed:9a:59": "foo3",
+ "0c:c4:7a:34:6e:3d": "oeth1",
+ "0c:c4:7a:34:6e:3c": "oeth0",
+}
+
+CFG_DRIVE_FILES_V2 = {
+ "ec2/2009-04-04/meta-data.json": json.dumps(EC2_META),
+ "ec2/2009-04-04/user-data": USER_DATA,
+ "ec2/latest/meta-data.json": json.dumps(EC2_META),
+ "ec2/latest/user-data": USER_DATA,
+ "openstack/2012-08-10/meta_data.json": json.dumps(OSTACK_META),
+ "openstack/2012-08-10/user_data": USER_DATA,
+ "openstack/content/0000": CONTENT_0,
+ "openstack/content/0001": CONTENT_1,
+ "openstack/latest/meta_data.json": json.dumps(OSTACK_META),
+ "openstack/latest/user_data": USER_DATA,
+ "openstack/latest/network_data.json": json.dumps(NETWORK_DATA),
+ "openstack/2015-10-15/meta_data.json": json.dumps(OSTACK_META),
+ "openstack/2015-10-15/user_data": USER_DATA,
+ "openstack/2015-10-15/network_data.json": json.dumps(NETWORK_DATA),
+}
+
+M_PATH = "cloudinit.sources.DataSourceConfigDrive."
+
+
+class TestConfigDriveDataSource(CiTestCase):
+ def setUp(self):
+ super(TestConfigDriveDataSource, self).setUp()
+ self.add_patch(
+ M_PATH + "util.find_devs_with", "m_find_devs_with", return_value=[]
+ )
+ self.tmp = self.tmp_dir()
+
+ def test_ec2_metadata(self):
+ populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
+ found = ds.read_config_drive(self.tmp)
+ self.assertTrue("ec2-metadata" in found)
+ ec2_md = found["ec2-metadata"]
+ self.assertEqual(EC2_META, ec2_md)
+
+ def test_dev_os_remap(self):
+ populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
+ cfg_ds = ds.DataSourceConfigDrive(
+ settings.CFG_BUILTIN, None, helpers.Paths({})
+ )
+ found = ds.read_config_drive(self.tmp)
+ cfg_ds.metadata = found["metadata"]
+ name_tests = {
+ "ami": "/dev/vda1",
+ "root": "/dev/vda1",
+ "ephemeral0": "/dev/vda2",
+ "swap": "/dev/vda3",
+ }
+ for name, dev_name in name_tests.items():
+ with ExitStack() as mocks:
+ provided_name = dev_name[len("/dev/") :]
+ provided_name = "s" + provided_name[1:]
+ find_mock = mocks.enter_context(
+ mock.patch.object(
+ util, "find_devs_with", return_value=[provided_name]
+ )
+ )
+ # We want os.path.exists() to return False on its first call,
+ # and True on its second call. We use a handy generator as
+ # the mock side effect for this. The mocked function returns
+ # what the side effect returns.
+
+ def exists_side_effect():
+ yield False
+ yield True
+
+ exists_mock = mocks.enter_context(
+ mock.patch.object(
+ os.path, "exists", side_effect=exists_side_effect()
+ )
+ )
+ self.assertEqual(dev_name, cfg_ds.device_name_to_device(name))
+
+ find_mock.assert_called_once_with(mock.ANY)
+ self.assertEqual(exists_mock.call_count, 2)
+
+ def test_dev_os_map(self):
+ populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
+ cfg_ds = ds.DataSourceConfigDrive(
+ settings.CFG_BUILTIN, None, helpers.Paths({})
+ )
+ found = ds.read_config_drive(self.tmp)
+ os_md = found["metadata"]
+ cfg_ds.metadata = os_md
+ name_tests = {
+ "ami": "/dev/vda1",
+ "root": "/dev/vda1",
+ "ephemeral0": "/dev/vda2",
+ "swap": "/dev/vda3",
+ }
+ for name, dev_name in name_tests.items():
+ with ExitStack() as mocks:
+ find_mock = mocks.enter_context(
+ mock.patch.object(
+ util, "find_devs_with", return_value=[dev_name]
+ )
+ )
+ exists_mock = mocks.enter_context(
+ mock.patch.object(os.path, "exists", return_value=True)
+ )
+ self.assertEqual(dev_name, cfg_ds.device_name_to_device(name))
+
+ find_mock.assert_called_once_with(mock.ANY)
+ exists_mock.assert_called_once_with(mock.ANY)
+
+ def test_dev_ec2_remap(self):
+ populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
+ cfg_ds = ds.DataSourceConfigDrive(
+ settings.CFG_BUILTIN, None, helpers.Paths({})
+ )
+ found = ds.read_config_drive(self.tmp)
+ ec2_md = found["ec2-metadata"]
+ os_md = found["metadata"]
+ cfg_ds.ec2_metadata = ec2_md
+ cfg_ds.metadata = os_md
+ name_tests = {
+ "ami": "/dev/vda1",
+ "root": "/dev/vda1",
+ "ephemeral0": "/dev/vda2",
+ "swap": "/dev/vda3",
+ None: None,
+ "bob": None,
+ "root2k": None,
+ }
+ for name, dev_name in name_tests.items():
+ # We want os.path.exists() to return False on its first call,
+ # and True on its second call. We use a handy generator as
+ # the mock side effect for this. The mocked function returns
+ # what the side effect returns.
+ def exists_side_effect():
+ yield False
+ yield True
+
+ with mock.patch.object(
+ os.path, "exists", side_effect=exists_side_effect()
+ ):
+ self.assertEqual(dev_name, cfg_ds.device_name_to_device(name))
+ # We don't assert the call count for os.path.exists() because
+ # not all of the entries in name_tests results in two calls to
+ # that function. Specifically, 'root2k' doesn't seem to call
+ # it at all.
+
+ def test_dev_ec2_map(self):
+ populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
+ cfg_ds = ds.DataSourceConfigDrive(
+ settings.CFG_BUILTIN, None, helpers.Paths({})
+ )
+ found = ds.read_config_drive(self.tmp)
+ ec2_md = found["ec2-metadata"]
+ os_md = found["metadata"]
+ cfg_ds.ec2_metadata = ec2_md
+ cfg_ds.metadata = os_md
+ name_tests = {
+ "ami": "/dev/sda1",
+ "root": "/dev/sda1",
+ "ephemeral0": "/dev/sda2",
+ "swap": "/dev/sda3",
+ None: None,
+ "bob": None,
+ "root2k": None,
+ }
+ for name, dev_name in name_tests.items():
+ with mock.patch.object(os.path, "exists", return_value=True):
+ self.assertEqual(dev_name, cfg_ds.device_name_to_device(name))
+
+ def test_dir_valid(self):
+ """Verify a dir is read as such."""
+
+ populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
+
+ found = ds.read_config_drive(self.tmp)
+
+ expected_md = copy(OSTACK_META)
+ expected_md["instance-id"] = expected_md["uuid"]
+ expected_md["local-hostname"] = expected_md["hostname"]
+
+ self.assertEqual(USER_DATA, found["userdata"])
+ self.assertEqual(expected_md, found["metadata"])
+ self.assertEqual(NETWORK_DATA, found["networkdata"])
+ self.assertEqual(found["files"]["/etc/foo.cfg"], CONTENT_0)
+ self.assertEqual(found["files"]["/etc/bar/bar.cfg"], CONTENT_1)
+
+ def test_seed_dir_valid_extra(self):
+ """Verify extra files do not affect datasource validity."""
+
+ data = copy(CFG_DRIVE_FILES_V2)
+ data["myfoofile.txt"] = "myfoocontent"
+ data["openstack/latest/random-file.txt"] = "random-content"
+
+ populate_dir(self.tmp, data)
+
+ found = ds.read_config_drive(self.tmp)
+
+ expected_md = copy(OSTACK_META)
+ expected_md["instance-id"] = expected_md["uuid"]
+ expected_md["local-hostname"] = expected_md["hostname"]
+
+ self.assertEqual(expected_md, found["metadata"])
+
+ def test_seed_dir_bad_json_metadata(self):
+ """Verify that bad json in metadata raises BrokenConfigDriveDir."""
+ data = copy(CFG_DRIVE_FILES_V2)
+
+ data["openstack/2012-08-10/meta_data.json"] = "non-json garbage {}"
+ data["openstack/2015-10-15/meta_data.json"] = "non-json garbage {}"
+ data["openstack/latest/meta_data.json"] = "non-json garbage {}"
+
+ populate_dir(self.tmp, data)
+
+ self.assertRaises(
+ openstack.BrokenMetadata, ds.read_config_drive, self.tmp
+ )
+
+ def test_seed_dir_no_configdrive(self):
+ """Verify that no metadata raises NonConfigDriveDir."""
+
+ my_d = os.path.join(self.tmp, "non-configdrive")
+ data = copy(CFG_DRIVE_FILES_V2)
+ data["myfoofile.txt"] = "myfoocontent"
+ data["openstack/latest/random-file.txt"] = "random-content"
+ data["content/foo"] = "foocontent"
+
+ self.assertRaises(openstack.NonReadable, ds.read_config_drive, my_d)
+
+ def test_seed_dir_missing(self):
+ """Verify that missing seed_dir raises NonConfigDriveDir."""
+ my_d = os.path.join(self.tmp, "nonexistantdirectory")
+ self.assertRaises(openstack.NonReadable, ds.read_config_drive, my_d)
+
+ def test_find_candidates(self):
+ devs_with_answers = {}
+
+ def my_devs_with(*args, **kwargs):
+ criteria = args[0] if len(args) else kwargs.pop("criteria", None)
+ return devs_with_answers.get(criteria, [])
+
+ def my_is_partition(dev):
+ return dev[-1] in "0123456789" and not dev.startswith("sr")
+
+ try:
+ orig_find_devs_with = util.find_devs_with
+ util.find_devs_with = my_devs_with
+
+ orig_is_partition = util.is_partition
+ util.is_partition = my_is_partition
+
+ devs_with_answers = {
+ "TYPE=vfat": [],
+ "TYPE=iso9660": ["/dev/vdb"],
+ "LABEL=config-2": ["/dev/vdb"],
+ }
+ self.assertEqual(["/dev/vdb"], ds.find_candidate_devs())
+
+ # add a vfat item
+ # zdd reverse sorts after vdb, but config-2 label is preferred
+ devs_with_answers["TYPE=vfat"] = ["/dev/zdd"]
+ self.assertEqual(
+ ["/dev/vdb", "/dev/zdd"], ds.find_candidate_devs()
+ )
+
+ # verify that partitions are considered, that have correct label.
+ devs_with_answers = {
+ "TYPE=vfat": ["/dev/sda1"],
+ "TYPE=iso9660": [],
+ "LABEL=config-2": ["/dev/vdb3"],
+ }
+ self.assertEqual(["/dev/vdb3"], ds.find_candidate_devs())
+
+ # Verify that uppercase labels are also found.
+ devs_with_answers = {
+ "TYPE=vfat": [],
+ "TYPE=iso9660": ["/dev/vdb"],
+ "LABEL=CONFIG-2": ["/dev/vdb"],
+ }
+ self.assertEqual(["/dev/vdb"], ds.find_candidate_devs())
+
+ finally:
+ util.find_devs_with = orig_find_devs_with
+ util.is_partition = orig_is_partition
+
+ @mock.patch(M_PATH + "on_first_boot")
+ def test_pubkeys_v2(self, on_first_boot):
+ """Verify that public-keys work in config-drive-v2."""
+ myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2)
+ self.assertEqual(
+ myds.get_public_ssh_keys(), [OSTACK_META["public_keys"]["mykey"]]
+ )
+ self.assertEqual("configdrive", myds.cloud_name)
+ self.assertEqual("openstack", myds.platform)
+ self.assertEqual("seed-dir (%s/seed)" % self.tmp, myds.subplatform)
+
+ def test_subplatform_config_drive_when_starts_with_dev(self):
+ """subplatform reports config-drive when source starts with /dev/."""
+ cfg_ds = ds.DataSourceConfigDrive(
+ settings.CFG_BUILTIN, None, helpers.Paths({})
+ )
+ with mock.patch(M_PATH + "find_candidate_devs") as m_find_devs:
+ with mock.patch(M_PATH + "util.mount_cb"):
+ with mock.patch(M_PATH + "on_first_boot"):
+ m_find_devs.return_value = ["/dev/anything"]
+ self.assertEqual(True, cfg_ds.get_data())
+ self.assertEqual("config-disk (/dev/anything)", cfg_ds.subplatform)
+
+
+@mock.patch(
+ "cloudinit.net.is_openvswitch_internal_interface",
+ mock.Mock(return_value=False),
+)
+class TestNetJson(CiTestCase):
+ def setUp(self):
+ super(TestNetJson, self).setUp()
+ self.tmp = self.tmp_dir()
+ self.maxDiff = None
+
+ @mock.patch(M_PATH + "on_first_boot")
+ def test_network_data_is_found(self, on_first_boot):
+ """Verify that network_data is present in ds in config-drive-v2."""
+ myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2)
+ self.assertIsNotNone(myds.network_json)
+
+ @mock.patch(M_PATH + "on_first_boot")
+ def test_network_config_is_converted(self, on_first_boot):
+ """Verify that network_data is converted and present on ds object."""
+ myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2)
+ network_config = openstack.convert_net_json(
+ NETWORK_DATA, known_macs=KNOWN_MACS
+ )
+ self.assertEqual(myds.network_config, network_config)
+
+ def test_network_config_conversion_dhcp6(self):
+ """Test some ipv6 input network json and check the expected
+ conversions."""
+ in_data = {
+ "links": [
+ {
+ "vif_id": "2ecc7709-b3f7-4448-9580-e1ec32d75bbd",
+ "ethernet_mac_address": "fa:16:3e:69:b0:58",
+ "type": "ovs",
+ "mtu": None,
+ "id": "tap2ecc7709-b3",
+ },
+ {
+ "vif_id": "2f88d109-5b57-40e6-af32-2472df09dc33",
+ "ethernet_mac_address": "fa:16:3e:d4:57:ad",
+ "type": "ovs",
+ "mtu": None,
+ "id": "tap2f88d109-5b",
+ },
+ ],
+ "networks": [
+ {
+ "link": "tap2ecc7709-b3",
+ "type": "ipv6_dhcpv6-stateless",
+ "network_id": "6d6357ac-0f70-4afa-8bd7-c274cc4ea235",
+ "id": "network0",
+ },
+ {
+ "link": "tap2f88d109-5b",
+ "type": "ipv6_dhcpv6-stateful",
+ "network_id": "d227a9b3-6960-4d94-8976-ee5788b44f54",
+ "id": "network1",
+ },
+ ],
+ }
+ out_data = {
+ "version": 1,
+ "config": [
+ {
+ "mac_address": "fa:16:3e:69:b0:58",
+ "mtu": None,
+ "name": "enp0s1",
+ "subnets": [{"type": "ipv6_dhcpv6-stateless"}],
+ "type": "physical",
+ },
+ {
+ "mac_address": "fa:16:3e:d4:57:ad",
+ "mtu": None,
+ "name": "enp0s2",
+ "subnets": [{"type": "ipv6_dhcpv6-stateful"}],
+ "type": "physical",
+ "accept-ra": True,
+ },
+ ],
+ }
+ conv_data = openstack.convert_net_json(in_data, known_macs=KNOWN_MACS)
+ self.assertEqual(out_data, conv_data)
+
+ def test_network_config_conversions(self):
+ """Tests a bunch of input network json and checks the
+ expected conversions."""
+ in_datas = [
+ NETWORK_DATA,
+ {
+ "services": [{"type": "dns", "address": "172.19.0.12"}],
+ "networks": [
+ {
+ "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4",
+ "type": "ipv4",
+ "netmask": "255.255.252.0",
+ "link": "tap1a81968a-79",
+ "routes": [
+ {
+ "netmask": "0.0.0.0",
+ "network": "0.0.0.0",
+ "gateway": "172.19.3.254",
+ }
+ ],
+ "ip_address": "172.19.1.34",
+ "id": "network0",
+ }
+ ],
+ "links": [
+ {
+ "type": "bridge",
+ "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f",
+ "ethernet_mac_address": "fa:16:3e:ed:9a:59",
+ "id": "tap1a81968a-79",
+ "mtu": None,
+ }
+ ],
+ },
+ ]
+ out_datas = [
+ {
+ "version": 1,
+ "config": [
+ {
+ "subnets": [{"type": "dhcp4"}],
+ "type": "physical",
+ "mac_address": "fa:16:3e:69:b0:58",
+ "name": "enp0s1",
+ "mtu": None,
+ },
+ {
+ "subnets": [{"type": "dhcp4"}],
+ "type": "physical",
+ "mac_address": "fa:16:3e:d4:57:ad",
+ "name": "enp0s2",
+ "mtu": None,
+ },
+ {
+ "subnets": [{"type": "dhcp4"}],
+ "type": "physical",
+ "mac_address": "fa:16:3e:05:30:fe",
+ "name": "nic0",
+ "mtu": None,
+ },
+ {
+ "type": "nameserver",
+ "address": "199.204.44.24",
+ },
+ {
+ "type": "nameserver",
+ "address": "199.204.47.54",
+ },
+ ],
+ },
+ {
+ "version": 1,
+ "config": [
+ {
+ "name": "foo3",
+ "mac_address": "fa:16:3e:ed:9a:59",
+ "mtu": None,
+ "type": "physical",
+ "subnets": [
+ {
+ "address": "172.19.1.34",
+ "netmask": "255.255.252.0",
+ "type": "static",
+ "ipv4": True,
+ "routes": [
+ {
+ "gateway": "172.19.3.254",
+ "netmask": "0.0.0.0",
+ "network": "0.0.0.0",
+ }
+ ],
+ }
+ ],
+ },
+ {
+ "type": "nameserver",
+ "address": "172.19.0.12",
+ },
+ ],
+ },
+ ]
+ for in_data, out_data in zip(in_datas, out_datas):
+ conv_data = openstack.convert_net_json(
+ in_data, known_macs=KNOWN_MACS
+ )
+ self.assertEqual(out_data, conv_data)
+
+
+@mock.patch(
+ "cloudinit.net.is_openvswitch_internal_interface",
+ mock.Mock(return_value=False),
+)
+class TestConvertNetworkData(CiTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestConvertNetworkData, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ def _getnames_in_config(self, ncfg):
+ return set(
+ [n["name"] for n in ncfg["config"] if n["type"] == "physical"]
+ )
+
+ def test_conversion_fills_names(self):
+ ncfg = openstack.convert_net_json(NETWORK_DATA, known_macs=KNOWN_MACS)
+ expected = set(["nic0", "enp0s1", "enp0s2"])
+ found = self._getnames_in_config(ncfg)
+ self.assertEqual(found, expected)
+
+ @mock.patch("cloudinit.net.get_interfaces_by_mac")
+ def test_convert_reads_system_prefers_name(self, get_interfaces_by_mac):
+ macs = KNOWN_MACS.copy()
+ macs.update(
+ {"fa:16:3e:05:30:fe": "foonic1", "fa:16:3e:69:b0:58": "ens1"}
+ )
+ get_interfaces_by_mac.return_value = macs
+
+ ncfg = openstack.convert_net_json(NETWORK_DATA)
+ expected = set(["nic0", "ens1", "enp0s2"])
+ found = self._getnames_in_config(ncfg)
+ self.assertEqual(found, expected)
+
+ def test_convert_raises_value_error_on_missing_name(self):
+ macs = {"aa:aa:aa:aa:aa:00": "ens1"}
+ self.assertRaises(
+ ValueError,
+ openstack.convert_net_json,
+ NETWORK_DATA,
+ known_macs=macs,
+ )
+
+ def test_conversion_with_route(self):
+ ncfg = openstack.convert_net_json(
+ NETWORK_DATA_2, known_macs=KNOWN_MACS
+ )
+ # not the best test, but see that we get a route in the
+ # network config and that it gets rendered to an ENI file
+ routes = []
+ for n in ncfg["config"]:
+ for s in n.get("subnets", []):
+ routes.extend(s.get("routes", []))
+ self.assertIn(
+ {"network": "0.0.0.0", "netmask": "0.0.0.0", "gateway": "2.2.2.9"},
+ routes,
+ )
+ eni_renderer = eni.Renderer()
+ eni_renderer.render_network_state(
+ network_state.parse_net_config_data(ncfg), target=self.tmp
+ )
+ with open(
+ os.path.join(self.tmp, "etc", "network", "interfaces"), "r"
+ ) as f:
+ eni_rendering = f.read()
+ self.assertIn("route add default gw 2.2.2.9", eni_rendering)
+
+ def test_conversion_with_tap(self):
+ ncfg = openstack.convert_net_json(
+ NETWORK_DATA_3, known_macs=KNOWN_MACS
+ )
+ physicals = set()
+ for i in ncfg["config"]:
+ if i.get("type") == "physical":
+ physicals.add(i["name"])
+ self.assertEqual(physicals, set(("foo1", "foo2")))
+
+ def test_bond_conversion(self):
+ # light testing of bond conversion and eni rendering of bond
+ ncfg = openstack.convert_net_json(
+ NETWORK_DATA_BOND, known_macs=KNOWN_MACS
+ )
+ eni_renderer = eni.Renderer()
+
+ eni_renderer.render_network_state(
+ network_state.parse_net_config_data(ncfg), target=self.tmp
+ )
+ with open(
+ os.path.join(self.tmp, "etc", "network", "interfaces"), "r"
+ ) as f:
+ eni_rendering = f.read()
+
+ # Verify there are expected interfaces in the net config.
+ interfaces = sorted(
+ [
+ i["name"]
+ for i in ncfg["config"]
+ if i["type"] in ("vlan", "bond", "physical")
+ ]
+ )
+ self.assertEqual(
+ sorted(["oeth0", "oeth1", "bond0", "bond0.602", "bond0.612"]),
+ interfaces,
+ )
+
+ words = eni_rendering.split()
+ # 'eth0' and 'eth1' are the ids. because their mac adresses
+ # map to other names, we should not see them in the ENI
+ self.assertNotIn("eth0", words)
+ self.assertNotIn("eth1", words)
+
+ # oeth0 and oeth1 are the interface names for eni.
+ # bond0 will be generated for the bond. Each should be auto.
+ self.assertIn("auto oeth0", eni_rendering)
+ self.assertIn("auto oeth1", eni_rendering)
+ self.assertIn("auto bond0", eni_rendering)
+ # The bond should have the given mac address
+ pos = eni_rendering.find("auto bond0")
+ self.assertIn(BOND_MAC, eni_rendering[pos:])
+
+ def test_vlan(self):
+ # light testing of vlan config conversion and eni rendering
+ ncfg = openstack.convert_net_json(
+ NETWORK_DATA_VLAN, known_macs=KNOWN_MACS
+ )
+ eni_renderer = eni.Renderer()
+ eni_renderer.render_network_state(
+ network_state.parse_net_config_data(ncfg), target=self.tmp
+ )
+ with open(
+ os.path.join(self.tmp, "etc", "network", "interfaces"), "r"
+ ) as f:
+ eni_rendering = f.read()
+
+ self.assertIn("iface enp0s1", eni_rendering)
+ self.assertIn("address 10.0.1.5", eni_rendering)
+ self.assertIn("auto enp0s1.602", eni_rendering)
+
+ def test_mac_addrs_can_be_upper_case(self):
+ # input mac addresses on rackspace may be upper case
+ my_netdata = deepcopy(NETWORK_DATA)
+ for link in my_netdata["links"]:
+ link["ethernet_mac_address"] = link["ethernet_mac_address"].upper()
+
+ ncfg = openstack.convert_net_json(my_netdata, known_macs=KNOWN_MACS)
+ config_name2mac = {}
+ for n in ncfg["config"]:
+ if n["type"] == "physical":
+ config_name2mac[n["name"]] = n["mac_address"]
+
+ expected = {
+ "nic0": "fa:16:3e:05:30:fe",
+ "enp0s1": "fa:16:3e:69:b0:58",
+ "enp0s2": "fa:16:3e:d4:57:ad",
+ }
+ self.assertEqual(expected, config_name2mac)
+
+ def test_unknown_device_types_accepted(self):
+ # If we don't recognise a link, we should treat it as physical for a
+ # best-effort boot
+ my_netdata = deepcopy(NETWORK_DATA)
+ my_netdata["links"][0]["type"] = "my-special-link-type"
+
+ ncfg = openstack.convert_net_json(my_netdata, known_macs=KNOWN_MACS)
+ config_name2mac = {}
+ for n in ncfg["config"]:
+ if n["type"] == "physical":
+ config_name2mac[n["name"]] = n["mac_address"]
+
+ expected = {
+ "nic0": "fa:16:3e:05:30:fe",
+ "enp0s1": "fa:16:3e:69:b0:58",
+ "enp0s2": "fa:16:3e:d4:57:ad",
+ }
+ self.assertEqual(expected, config_name2mac)
+
+ # We should, however, warn the user that we don't recognise the type
+ self.assertIn(
+ "Unknown network_data link type (my-special-link-type)",
+ self.logs.getvalue(),
+ )
+
+
+def cfg_ds_from_dir(base_d, files=None):
+ run = os.path.join(base_d, "run")
+ os.mkdir(run)
+ cfg_ds = ds.DataSourceConfigDrive(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": run})
+ )
+ cfg_ds.seed_dir = os.path.join(base_d, "seed")
+ if files:
+ populate_dir(cfg_ds.seed_dir, files)
+ cfg_ds.known_macs = KNOWN_MACS.copy()
+ if not cfg_ds.get_data():
+ raise RuntimeError(
+ "Data source did not extract itself from seed directory %s"
+ % cfg_ds.seed_dir
+ )
+ return cfg_ds
+
+
+def populate_ds_from_read_config(cfg_ds, source, results):
+ """Patch the DataSourceConfigDrive from the results of
+ read_config_drive_dir hopefully in line with what it would have
+ if cfg_ds.get_data had been successfully called"""
+ cfg_ds.source = source
+ cfg_ds.metadata = results.get("metadata")
+ cfg_ds.ec2_metadata = results.get("ec2-metadata")
+ cfg_ds.userdata_raw = results.get("userdata")
+ cfg_ds.version = results.get("version")
+ cfg_ds.network_json = results.get("networkdata")
+ cfg_ds._network_config = openstack.convert_net_json(
+ cfg_ds.network_json, known_macs=KNOWN_MACS
+ )
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_digitalocean.py b/tests/unittests/sources/test_digitalocean.py
new file mode 100644
index 00000000..f3e6224e
--- /dev/null
+++ b/tests/unittests/sources/test_digitalocean.py
@@ -0,0 +1,389 @@
+# Copyright (C) 2014 Neal Shrader
+#
+# Author: Neal Shrader <neal@digitalocean.com>
+# Author: Ben Howard <bh@digitalocean.com>
+# Author: Scott Moser <smoser@ubuntu.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import json
+
+from cloudinit import helpers, settings
+from cloudinit.sources import DataSourceDigitalOcean
+from cloudinit.sources.helpers import digitalocean
+from tests.unittests.helpers import CiTestCase, mock
+
+DO_MULTIPLE_KEYS = [
+ "ssh-rsa AAAAB3NzaC1yc2EAAAA... test1@do.co",
+ "ssh-rsa AAAAB3NzaC1yc2EAAAA... test2@do.co",
+]
+DO_SINGLE_KEY = "ssh-rsa AAAAB3NzaC1yc2EAAAA... test@do.co"
+
+# the following JSON was taken from droplet (that's why its a string)
+DO_META = json.loads(
+ """
+{
+ "droplet_id": "22532410",
+ "hostname": "utl-96268",
+ "vendor_data": "vendordata goes here",
+ "user_data": "userdata goes here",
+ "public_keys": "",
+ "auth_key": "authorization_key",
+ "region": "nyc3",
+ "interfaces": {
+ "private": [
+ {
+ "ipv4": {
+ "ip_address": "10.132.6.205",
+ "netmask": "255.255.0.0",
+ "gateway": "10.132.0.1"
+ },
+ "mac": "04:01:57:d1:9e:02",
+ "type": "private"
+ }
+ ],
+ "public": [
+ {
+ "ipv4": {
+ "ip_address": "192.0.0.20",
+ "netmask": "255.255.255.0",
+ "gateway": "104.236.0.1"
+ },
+ "ipv6": {
+ "ip_address": "2604:A880:0800:0000:1000:0000:0000:0000",
+ "cidr": 64,
+ "gateway": "2604:A880:0800:0000:0000:0000:0000:0001"
+ },
+ "anchor_ipv4": {
+ "ip_address": "10.0.0.5",
+ "netmask": "255.255.0.0",
+ "gateway": "10.0.0.1"
+ },
+ "mac": "04:01:57:d1:9e:01",
+ "type": "public"
+ }
+ ]
+ },
+ "floating_ip": {
+ "ipv4": {
+ "active": false
+ }
+ },
+ "dns": {
+ "nameservers": [
+ "2001:4860:4860::8844",
+ "2001:4860:4860::8888",
+ "8.8.8.8"
+ ]
+ }
+}
+"""
+)
+
+# This has no private interface
+DO_META_2 = {
+ "droplet_id": 27223699,
+ "hostname": "smtest1",
+ "vendor_data": "\n".join(
+ [
+ '"Content-Type: multipart/mixed; '
+ 'boundary="===============8645434374073493512=="',
+ "MIME-Version: 1.0",
+ "",
+ "--===============8645434374073493512==",
+ "MIME-Version: 1.0"
+ 'Content-Type: text/cloud-config; charset="us-ascii"'
+ "Content-Transfer-Encoding: 7bit"
+ 'Content-Disposition: attachment; filename="cloud-config"'
+ "",
+ "#cloud-config",
+ "disable_root: false",
+ "manage_etc_hosts: true",
+ "",
+ "",
+ "--===============8645434374073493512==",
+ ]
+ ),
+ "public_keys": ["ssh-rsa AAAAB3NzaN...N3NtHw== smoser@brickies"],
+ "auth_key": "88888888888888888888888888888888",
+ "region": "nyc3",
+ "interfaces": {
+ "public": [
+ {
+ "ipv4": {
+ "ip_address": "45.55.249.133",
+ "netmask": "255.255.192.0",
+ "gateway": "45.55.192.1",
+ },
+ "anchor_ipv4": {
+ "ip_address": "10.17.0.5",
+ "netmask": "255.255.0.0",
+ "gateway": "10.17.0.1",
+ },
+ "mac": "ae:cc:08:7c:88:00",
+ "type": "public",
+ }
+ ]
+ },
+ "floating_ip": {"ipv4": {"active": True, "ip_address": "138.197.59.92"}},
+ "dns": {"nameservers": ["8.8.8.8", "8.8.4.4"]},
+ "tags": None,
+}
+
+DO_META["public_keys"] = DO_SINGLE_KEY
+
+MD_URL = "http://169.254.169.254/metadata/v1.json"
+
+
+def _mock_dmi():
+ return (True, DO_META.get("id"))
+
+
+class TestDataSourceDigitalOcean(CiTestCase):
+ """
+ Test reading the meta-data
+ """
+
+ def setUp(self):
+ super(TestDataSourceDigitalOcean, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ def get_ds(self, get_sysinfo=_mock_dmi):
+ ds = DataSourceDigitalOcean.DataSourceDigitalOcean(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ ds.use_ip4LL = False
+ if get_sysinfo is not None:
+ ds._get_sysinfo = get_sysinfo
+ return ds
+
+ @mock.patch("cloudinit.sources.helpers.digitalocean.read_sysinfo")
+ def test_returns_false_not_on_docean(self, m_read_sysinfo):
+ m_read_sysinfo.return_value = (False, None)
+ ds = self.get_ds(get_sysinfo=None)
+ self.assertEqual(False, ds.get_data())
+ self.assertTrue(m_read_sysinfo.called)
+
+ @mock.patch("cloudinit.sources.helpers.digitalocean.read_metadata")
+ def test_metadata(self, mock_readmd):
+ mock_readmd.return_value = DO_META.copy()
+
+ ds = self.get_ds()
+ ret = ds.get_data()
+ self.assertTrue(ret)
+
+ self.assertTrue(mock_readmd.called)
+
+ self.assertEqual(DO_META.get("user_data"), ds.get_userdata_raw())
+ self.assertEqual(DO_META.get("vendor_data"), ds.get_vendordata_raw())
+ self.assertEqual(DO_META.get("region"), ds.availability_zone)
+ self.assertEqual(DO_META.get("droplet_id"), ds.get_instance_id())
+ self.assertEqual(DO_META.get("hostname"), ds.get_hostname())
+
+ # Single key
+ self.assertEqual(
+ [DO_META.get("public_keys")], ds.get_public_ssh_keys()
+ )
+
+ self.assertIsInstance(ds.get_public_ssh_keys(), list)
+
+ @mock.patch("cloudinit.sources.helpers.digitalocean.read_metadata")
+ def test_multiple_ssh_keys(self, mock_readmd):
+ metadata = DO_META.copy()
+ metadata["public_keys"] = DO_MULTIPLE_KEYS
+ mock_readmd.return_value = metadata.copy()
+
+ ds = self.get_ds()
+ ret = ds.get_data()
+ self.assertTrue(ret)
+
+ self.assertTrue(mock_readmd.called)
+
+ # Multiple keys
+ self.assertEqual(metadata["public_keys"], ds.get_public_ssh_keys())
+ self.assertIsInstance(ds.get_public_ssh_keys(), list)
+
+
+class TestNetworkConvert(CiTestCase):
+ def _get_networking(self):
+ self.m_get_by_mac.return_value = {
+ "04:01:57:d1:9e:01": "ens1",
+ "04:01:57:d1:9e:02": "ens2",
+ "b8:ae:ed:75:5f:9a": "enp0s25",
+ "ae:cc:08:7c:88:00": "meta2p1",
+ }
+ netcfg = digitalocean.convert_network_configuration(
+ DO_META["interfaces"], DO_META["dns"]["nameservers"]
+ )
+ self.assertIn("config", netcfg)
+ return netcfg
+
+ def setUp(self):
+ super(TestNetworkConvert, self).setUp()
+ self.add_patch("cloudinit.net.get_interfaces_by_mac", "m_get_by_mac")
+
+ def test_networking_defined(self):
+ netcfg = self._get_networking()
+ self.assertIsNotNone(netcfg)
+ dns_defined = False
+
+ for part in netcfg.get("config"):
+ n_type = part.get("type")
+ print("testing part ", n_type, "\n", json.dumps(part, indent=3))
+
+ if n_type == "nameserver":
+ n_address = part.get("address")
+ self.assertIsNotNone(n_address)
+ self.assertEqual(len(n_address), 3)
+
+ dns_resolvers = DO_META["dns"]["nameservers"]
+ for x in n_address:
+ self.assertIn(x, dns_resolvers)
+ dns_defined = True
+
+ else:
+ n_subnets = part.get("type")
+ n_name = part.get("name")
+ n_mac = part.get("mac_address")
+
+ self.assertIsNotNone(n_type)
+ self.assertIsNotNone(n_subnets)
+ self.assertIsNotNone(n_name)
+ self.assertIsNotNone(n_mac)
+
+ self.assertTrue(dns_defined)
+
+ def _get_nic_definition(self, int_type, expected_name):
+ """helper function to return if_type (i.e. public) and the expected
+ name used by cloud-init (i.e eth0)"""
+ netcfg = self._get_networking()
+ meta_def = (DO_META.get("interfaces")).get(int_type)[0]
+
+ self.assertEqual(int_type, meta_def.get("type"))
+
+ for nic_def in netcfg.get("config"):
+ print(nic_def)
+ if nic_def.get("name") == expected_name:
+ return nic_def, meta_def
+
+ def _get_match_subn(self, subnets, ip_addr):
+ """get the matching subnet definition based on ip address"""
+ for subn in subnets:
+ address = subn.get("address")
+ self.assertIsNotNone(address)
+
+ # equals won't work because of ipv6 addressing being in
+ # cidr notation, i.e fe00::1/64
+ if ip_addr in address:
+ print(json.dumps(subn, indent=3))
+ return subn
+
+ def test_correct_gateways_defined(self):
+ """test to make sure the eth0 ipv4 and ipv6 gateways are defined"""
+ netcfg = self._get_networking()
+ gateways = []
+ for nic_def in netcfg.get("config"):
+ if nic_def.get("type") != "physical":
+ continue
+ for subn in nic_def.get("subnets"):
+ if "gateway" in subn:
+ gateways.append(subn.get("gateway"))
+
+ # we should have two gateways, one ipv4 and ipv6
+ self.assertEqual(len(gateways), 2)
+
+ # make that the ipv6 gateway is there
+ (nic_def, meta_def) = self._get_nic_definition("public", "eth0")
+ ipv4_def = meta_def.get("ipv4")
+ self.assertIn(ipv4_def.get("gateway"), gateways)
+
+ # make sure the the ipv6 gateway is there
+ ipv6_def = meta_def.get("ipv6")
+ self.assertIn(ipv6_def.get("gateway"), gateways)
+
+ def test_public_interface_defined(self):
+ """test that the public interface is defined as eth0"""
+ (nic_def, meta_def) = self._get_nic_definition("public", "eth0")
+ self.assertEqual("eth0", nic_def.get("name"))
+ self.assertEqual(meta_def.get("mac"), nic_def.get("mac_address"))
+ self.assertEqual("physical", nic_def.get("type"))
+
+ def test_private_interface_defined(self):
+ """test that the private interface is defined as eth1"""
+ (nic_def, meta_def) = self._get_nic_definition("private", "eth1")
+ self.assertEqual("eth1", nic_def.get("name"))
+ self.assertEqual(meta_def.get("mac"), nic_def.get("mac_address"))
+ self.assertEqual("physical", nic_def.get("type"))
+
+ def test_public_interface_ipv6(self):
+ """test public ipv6 addressing"""
+ (nic_def, meta_def) = self._get_nic_definition("public", "eth0")
+ ipv6_def = meta_def.get("ipv6")
+ self.assertIsNotNone(ipv6_def)
+
+ subn_def = self._get_match_subn(
+ nic_def.get("subnets"), ipv6_def.get("ip_address")
+ )
+
+ cidr_notated_address = "{0}/{1}".format(
+ ipv6_def.get("ip_address"), ipv6_def.get("cidr")
+ )
+
+ self.assertEqual(cidr_notated_address, subn_def.get("address"))
+ self.assertEqual(ipv6_def.get("gateway"), subn_def.get("gateway"))
+
+ def test_public_interface_ipv4(self):
+ """test public ipv4 addressing"""
+ (nic_def, meta_def) = self._get_nic_definition("public", "eth0")
+ ipv4_def = meta_def.get("ipv4")
+ self.assertIsNotNone(ipv4_def)
+
+ subn_def = self._get_match_subn(
+ nic_def.get("subnets"), ipv4_def.get("ip_address")
+ )
+
+ self.assertEqual(ipv4_def.get("netmask"), subn_def.get("netmask"))
+ self.assertEqual(ipv4_def.get("gateway"), subn_def.get("gateway"))
+
+ def test_public_interface_anchor_ipv4(self):
+ """test public ipv4 addressing"""
+ (nic_def, meta_def) = self._get_nic_definition("public", "eth0")
+ ipv4_def = meta_def.get("anchor_ipv4")
+ self.assertIsNotNone(ipv4_def)
+
+ subn_def = self._get_match_subn(
+ nic_def.get("subnets"), ipv4_def.get("ip_address")
+ )
+
+ self.assertEqual(ipv4_def.get("netmask"), subn_def.get("netmask"))
+ self.assertNotIn("gateway", subn_def)
+
+ @mock.patch("cloudinit.net.get_interfaces_by_mac")
+ def test_convert_without_private(self, m_get_by_mac):
+ m_get_by_mac.return_value = {
+ "b8:ae:ed:75:5f:9a": "enp0s25",
+ "ae:cc:08:7c:88:00": "meta2p1",
+ }
+ netcfg = digitalocean.convert_network_configuration(
+ DO_META_2["interfaces"], DO_META_2["dns"]["nameservers"]
+ )
+
+ # print(netcfg)
+ byname = {}
+ for i in netcfg["config"]:
+ if "name" in i:
+ if i["name"] in byname:
+ raise ValueError(
+ "name '%s' in config twice: %s" % (i["name"], netcfg)
+ )
+ byname[i["name"]] = i
+ self.assertTrue("eth0" in byname)
+ self.assertTrue("subnets" in byname["eth0"])
+ eth0 = byname["eth0"]
+ self.assertEqual(
+ sorted(["45.55.249.133", "10.17.0.5"]),
+ sorted([i["address"] for i in eth0["subnets"]]),
+ )
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/sources/test_ec2.py
index a93f2195..b376660d 100644
--- a/tests/unittests/test_datasource/test_ec2.py
+++ b/tests/unittests/sources/test_ec2.py
@@ -1,35 +1,37 @@
# This file is part of cloud-init. See LICENSE file for license information.
import copy
-import httpretty
import json
-import requests
from unittest import mock
+import httpretty
+import requests
+
from cloudinit import helpers
from cloudinit.sources import DataSourceEc2 as ec2
-from cloudinit.tests import helpers as test_helpers
-
+from tests.unittests import helpers as test_helpers
DYNAMIC_METADATA = {
"instance-identity": {
- "document": json.dumps({
- "devpayProductCodes": None,
- "marketplaceProductCodes": ["1abc2defghijklm3nopqrs4tu"],
- "availabilityZone": "us-west-2b",
- "privateIp": "10.158.112.84",
- "version": "2017-09-30",
- "instanceId": "my-identity-id",
- "billingProducts": None,
- "instanceType": "t2.micro",
- "accountId": "123456789012",
- "imageId": "ami-5fb8c835",
- "pendingTime": "2016-11-19T16:32:11Z",
- "architecture": "x86_64",
- "kernelId": None,
- "ramdiskId": None,
- "region": "us-west-2"
- })
+ "document": json.dumps(
+ {
+ "devpayProductCodes": None,
+ "marketplaceProductCodes": ["1abc2defghijklm3nopqrs4tu"],
+ "availabilityZone": "us-west-2b",
+ "privateIp": "10.158.112.84",
+ "version": "2017-09-30",
+ "instanceId": "my-identity-id",
+ "billingProducts": None,
+ "instanceType": "t2.micro",
+ "accountId": "123456789012",
+ "imageId": "ami-5fb8c835",
+ "pendingTime": "2016-11-19T16:32:11Z",
+ "architecture": "x86_64",
+ "kernelId": None,
+ "ramdiskId": None,
+ "region": "us-west-2",
+ }
+ )
}
}
@@ -52,7 +54,7 @@ DEFAULT_METADATA = {
"local-hostname": "ip-172-3-3-15.us-east-2.compute.internal",
"local-ipv4": "172.3.3.15",
"mac": "06:17:04:d7:26:09",
- "metrics": {"vhostmd": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>"},
+ "metrics": {"vhostmd": '<?xml version="1.0" encoding="UTF-8"?>'},
"network": {
"interfaces": {
"macs": {
@@ -61,13 +63,15 @@ DEFAULT_METADATA = {
"interface-id": "eni-e44ef49e",
"ipv4-associations": {"13.59.77.202": "172.3.3.15"},
"ipv6s": "2600:1f16:aeb:b20b:9d87:a4af:5cc9:73dc",
- "local-hostname": ("ip-172-3-3-15.us-east-2."
- "compute.internal"),
+ "local-hostname": (
+ "ip-172-3-3-15.us-east-2.compute.internal"
+ ),
"local-ipv4s": "172.3.3.15",
"mac": "06:17:04:d7:26:09",
"owner-id": "950047163771",
- "public-hostname": ("ec2-13-59-77-202.us-east-2."
- "compute.amazonaws.com"),
+ "public-hostname": (
+ "ec2-13-59-77-202.us-east-2.compute.amazonaws.com"
+ ),
"public-ipv4s": "13.59.77.202",
"security-group-ids": "sg-5a61d333",
"security-groups": "wide-open",
@@ -77,20 +81,22 @@ DEFAULT_METADATA = {
"vpc-id": "vpc-87e72bee",
"vpc-ipv4-cidr-block": "172.31.0.0/16",
"vpc-ipv4-cidr-blocks": "172.31.0.0/16",
- "vpc-ipv6-cidr-blocks": "2600:1f16:aeb:b200::/56"
+ "vpc-ipv6-cidr-blocks": "2600:1f16:aeb:b200::/56",
},
"06:17:04:d7:26:08": {
- "device-number": "1", # Only IPv4 local config
+ "device-number": "1", # Only IPv4 local config
"interface-id": "eni-e44ef49f",
"ipv4-associations": {"": "172.3.3.16"},
"ipv6s": "", # No IPv6 config
- "local-hostname": ("ip-172-3-3-16.us-east-2."
- "compute.internal"),
+ "local-hostname": (
+ "ip-172-3-3-16.us-east-2.compute.internal"
+ ),
"local-ipv4s": "172.3.3.16",
"mac": "06:17:04:d7:26:08",
"owner-id": "950047163771",
- "public-hostname": ("ec2-172-3-3-16.us-east-2."
- "compute.amazonaws.com"),
+ "public-hostname": (
+ "ec2-172-3-3-16.us-east-2.compute.amazonaws.com"
+ ),
"public-ipv4s": "", # No public ipv4 config
"security-group-ids": "sg-5a61d333",
"security-groups": "wide-open",
@@ -100,8 +106,8 @@ DEFAULT_METADATA = {
"vpc-id": "vpc-87e72bee",
"vpc-ipv4-cidr-block": "172.31.0.0/16",
"vpc-ipv4-cidr-blocks": "172.31.0.0/16",
- "vpc-ipv6-cidr-blocks": ""
- }
+ "vpc-ipv6-cidr-blocks": "",
+ },
}
}
},
@@ -123,24 +129,17 @@ DEFAULT_METADATA = {
NIC1_MD_IPV4_IPV6_MULTI_IP = {
"device-number": "0",
"interface-id": "eni-0d6335689899ce9cc",
- "ipv4-associations": {
- "18.218.219.181": "172.31.44.13"
- },
+ "ipv4-associations": {"18.218.219.181": "172.31.44.13"},
"ipv6s": [
"2600:1f16:292:100:c187:593c:4349:136",
"2600:1f16:292:100:f153:12a3:c37c:11f9",
- "2600:1f16:292:100:f152:2222:3333:4444"
- ],
- "local-hostname": ("ip-172-31-44-13.us-east-2."
- "compute.internal"),
- "local-ipv4s": [
- "172.31.44.13",
- "172.31.45.70"
+ "2600:1f16:292:100:f152:2222:3333:4444",
],
+ "local-hostname": "ip-172-31-44-13.us-east-2.compute.internal",
+ "local-ipv4s": ["172.31.44.13", "172.31.45.70"],
"mac": "0a:07:84:3d:6e:38",
"owner-id": "329910648901",
- "public-hostname": ("ec2-18-218-219-181.us-east-2."
- "compute.amazonaws.com"),
+ "public-hostname": "ec2-18-218-219-181.us-east-2.compute.amazonaws.com",
"public-ipv4s": "18.218.219.181",
"security-group-ids": "sg-0c387755222ba8d2e",
"security-groups": "launch-wizard-4",
@@ -150,7 +149,7 @@ NIC1_MD_IPV4_IPV6_MULTI_IP = {
"vpc-id": "vpc-a07f62c8",
"vpc-ipv4-cidr-block": "172.31.0.0/16",
"vpc-ipv4-cidr-blocks": "172.31.0.0/16",
- "vpc_ipv6_cidr_blocks": "2600:1f16:292:100::/56"
+ "vpc_ipv6_cidr_blocks": "2600:1f16:292:100::/56",
}
NIC2_MD = {
@@ -166,30 +165,22 @@ NIC2_MD = {
"subnet-ipv4-cidr-block": "172.31.32.0/20",
"vpc-id": "vpc-a07f62c8",
"vpc-ipv4-cidr-block": "172.31.0.0/16",
- "vpc-ipv4-cidr-blocks": "172.31.0.0/16"
+ "vpc-ipv4-cidr-blocks": "172.31.0.0/16",
}
SECONDARY_IP_METADATA_2018_09_24 = {
"ami-id": "ami-0986c2ac728528ac2",
"ami-launch-index": "0",
"ami-manifest-path": "(unknown)",
- "block-device-mapping": {
- "ami": "/dev/sda1",
- "root": "/dev/sda1"
- },
- "events": {
- "maintenance": {
- "history": "[]",
- "scheduled": "[]"
- }
- },
+ "block-device-mapping": {"ami": "/dev/sda1", "root": "/dev/sda1"},
+ "events": {"maintenance": {"history": "[]", "scheduled": "[]"}},
"hostname": "ip-172-31-44-13.us-east-2.compute.internal",
"identity-credentials": {
"ec2": {
"info": {
"AccountId": "329910648901",
"Code": "Success",
- "LastUpdated": "2019-07-06T14:22:56Z"
+ "LastUpdated": "2019-07-06T14:22:56Z",
}
}
},
@@ -199,9 +190,7 @@ SECONDARY_IP_METADATA_2018_09_24 = {
"local-hostname": "ip-172-31-44-13.us-east-2.compute.internal",
"local-ipv4": "172.31.44.13",
"mac": "0a:07:84:3d:6e:38",
- "metrics": {
- "vhostmd": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
- },
+ "metrics": {"vhostmd": '<?xml version="1.0" encoding="UTF-8"?>'},
"network": {
"interfaces": {
"macs": {
@@ -209,27 +198,17 @@ SECONDARY_IP_METADATA_2018_09_24 = {
}
}
},
- "placement": {
- "availability-zone": "us-east-2c"
- },
+ "placement": {"availability-zone": "us-east-2c"},
"profile": "default-hvm",
- "public-hostname": (
- "ec2-18-218-219-181.us-east-2.compute.amazonaws.com"),
+ "public-hostname": "ec2-18-218-219-181.us-east-2.compute.amazonaws.com",
"public-ipv4": "18.218.219.181",
- "public-keys": {
- "yourkeyname,e": [
- "ssh-rsa AAAAW...DZ yourkeyname"
- ]
- },
+ "public-keys": {"yourkeyname,e": ["ssh-rsa AAAAW...DZ yourkeyname"]},
"reservation-id": "r-09b4917135cdd33be",
"security-groups": "launch-wizard-4",
- "services": {
- "domain": "amazonaws.com",
- "partition": "aws"
- }
+ "services": {"domain": "amazonaws.com", "partition": "aws"},
}
-M_PATH_NET = 'cloudinit.sources.DataSourceEc2.net.'
+M_PATH_NET = "cloudinit.sources.DataSourceEc2.net."
def _register_ssh_keys(rfunc, base_url, keys_data):
@@ -250,9 +229,9 @@ def _register_ssh_keys(rfunc, base_url, keys_data):
"""
base_url = base_url.rstrip("/")
- odd_index = '\n'.join(
- ["{0}={1}".format(n, name)
- for n, name in enumerate(sorted(keys_data))])
+ odd_index = "\n".join(
+ ["{0}={1}".format(n, name) for n, name in enumerate(sorted(keys_data))]
+ )
rfunc(base_url, odd_index)
rfunc(base_url + "/", odd_index)
@@ -260,7 +239,7 @@ def _register_ssh_keys(rfunc, base_url, keys_data):
for n, name in enumerate(sorted(keys_data)):
val = keys_data[name]
if isinstance(val, list):
- val = '\n'.join(val)
+ val = "\n".join(val)
burl = base_url + "/%s" % n
rfunc(burl, "openssh-key")
rfunc(burl + "/", "openssh-key")
@@ -281,6 +260,7 @@ def register_mock_metaserver(base_url, data):
base_url/mac with 00:16:3e:00:00:00
In the index, references to lists or dictionaries have a trailing /.
"""
+
def register_helper(register, base_url, body):
if not isinstance(base_url, str):
register(base_url, body)
@@ -289,25 +269,24 @@ def register_mock_metaserver(base_url, data):
if isinstance(body, str):
register(base_url, body)
elif isinstance(body, list):
- register(base_url, '\n'.join(body) + '\n')
- register(base_url + '/', '\n'.join(body) + '\n')
+ register(base_url, "\n".join(body) + "\n")
+ register(base_url + "/", "\n".join(body) + "\n")
elif isinstance(body, dict):
vals = []
for k, v in body.items():
- if k == 'public-keys':
- _register_ssh_keys(
- register, base_url + '/public-keys/', v)
+ if k == "public-keys":
+ _register_ssh_keys(register, base_url + "/public-keys/", v)
continue
suffix = k.rstrip("/")
if not isinstance(v, (str, list)):
suffix += "/"
vals.append(suffix)
- url = base_url + '/' + suffix
+ url = base_url + "/" + suffix
register_helper(register, url, v)
- register(base_url, '\n'.join(vals) + '\n')
- register(base_url + '/', '\n'.join(vals) + '\n')
+ register(base_url, "\n".join(vals) + "\n")
+ register(base_url + "/", "\n".join(vals) + "\n")
elif body is None:
- register(base_url, 'not found', status=404)
+ register(base_url, "not found", status=404)
def myreg(*argc, **kwargs):
url = argc[0]
@@ -322,9 +301,9 @@ class TestEc2(test_helpers.HttprettyTestCase):
maxDiff = None
valid_platform_data = {
- 'uuid': 'ec212f79-87d1-2f1d-588f-d86dc0fd5412',
- 'uuid_source': 'dmi',
- 'serial': 'ec212f79-87d1-2f1d-588f-d86dc0fd5412',
+ "uuid": "ec212f79-87d1-2f1d-588f-d86dc0fd5412",
+ "uuid_source": "dmi",
+ "serial": "ec212f79-87d1-2f1d-588f-d86dc0fd5412",
}
def setUp(self):
@@ -333,9 +312,9 @@ class TestEc2(test_helpers.HttprettyTestCase):
self.metadata_addr = self.datasource.metadata_urls[0]
self.tmp = self.tmp_dir()
- def data_url(self, version, data_item='meta-data'):
+ def data_url(self, version, data_item="meta-data"):
"""Return a metadata url based on the version provided."""
- return '/'.join([self.metadata_addr, version, data_item])
+ return "/".join([self.metadata_addr, version, data_item])
def _patch_add_cleanup(self, mpath, *args, **kwargs):
p = mock.patch(mpath, *args, **kwargs)
@@ -345,7 +324,7 @@ class TestEc2(test_helpers.HttprettyTestCase):
def _setup_ds(self, sys_cfg, platform_data, md, md_version=None):
self.uris = []
distro = {}
- paths = helpers.Paths({'run_dir': self.tmp})
+ paths = helpers.Paths({"run_dir": self.tmp})
if sys_cfg is None:
sys_cfg = {}
ds = self.datasource(sys_cfg=sys_cfg, distro=distro, paths=paths)
@@ -354,32 +333,39 @@ class TestEc2(test_helpers.HttprettyTestCase):
if platform_data is not None:
self._patch_add_cleanup(
"cloudinit.sources.DataSourceEc2._collect_platform_data",
- return_value=platform_data)
+ return_value=platform_data,
+ )
if md:
- all_versions = (
- [ds.min_metadata_version] + ds.extended_metadata_versions)
- token_url = self.data_url('latest', data_item='api/token')
- register_mock_metaserver(token_url, 'API-TOKEN')
+ all_versions = [
+ ds.min_metadata_version
+ ] + ds.extended_metadata_versions
+ token_url = self.data_url("latest", data_item="api/token")
+ register_mock_metaserver(token_url, "API-TOKEN")
for version in all_versions:
- metadata_url = self.data_url(version) + '/'
+ metadata_url = self.data_url(version) + "/"
if version == md_version:
# Register all metadata for desired version
register_mock_metaserver(
- metadata_url, md.get('md', DEFAULT_METADATA))
+ metadata_url, md.get("md", DEFAULT_METADATA)
+ )
userdata_url = self.data_url(
- version, data_item='user-data')
- register_mock_metaserver(userdata_url, md.get('ud', ''))
+ version, data_item="user-data"
+ )
+ register_mock_metaserver(userdata_url, md.get("ud", ""))
identity_url = self.data_url(
- version, data_item='dynamic/instance-identity')
+ version, data_item="dynamic/instance-identity"
+ )
register_mock_metaserver(
- identity_url, md.get('id', DYNAMIC_METADATA))
+ identity_url, md.get("id", DYNAMIC_METADATA)
+ )
else:
- instance_id_url = metadata_url + 'instance-id'
+ instance_id_url = metadata_url + "instance-id"
if version == ds.min_metadata_version:
# Add min_metadata_version service availability check
register_mock_metaserver(
- instance_id_url, DEFAULT_METADATA['instance-id'])
+ instance_id_url, DEFAULT_METADATA["instance-id"]
+ )
else:
# Register 404s for all unrequested extended versions
register_mock_metaserver(instance_id_url, None)
@@ -389,24 +375,33 @@ class TestEc2(test_helpers.HttprettyTestCase):
"""network_config property returns network version 2 for metadata"""
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': True}}},
- md={'md': DEFAULT_METADATA})
- find_fallback_path = M_PATH_NET + 'find_fallback_nic'
+ sys_cfg={"datasource": {"Ec2": {"strict_id": True}}},
+ md={"md": DEFAULT_METADATA},
+ )
+ find_fallback_path = M_PATH_NET + "find_fallback_nic"
with mock.patch(find_fallback_path) as m_find_fallback:
- m_find_fallback.return_value = 'eth9'
+ m_find_fallback.return_value = "eth9"
ds.get_data()
- mac1 = '06:17:04:d7:26:09' # Defined in DEFAULT_METADATA
- expected = {'version': 2, 'ethernets': {'eth9': {
- 'match': {'macaddress': '06:17:04:d7:26:09'}, 'set-name': 'eth9',
- 'dhcp4': True, 'dhcp6': True}}}
- patch_path = M_PATH_NET + 'get_interfaces_by_mac'
- get_interface_mac_path = M_PATH_NET + 'get_interface_mac'
+ mac1 = "06:17:04:d7:26:09" # Defined in DEFAULT_METADATA
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": "06:17:04:d7:26:09"},
+ "set-name": "eth9",
+ "dhcp4": True,
+ "dhcp6": True,
+ }
+ },
+ }
+ patch_path = M_PATH_NET + "get_interfaces_by_mac"
+ get_interface_mac_path = M_PATH_NET + "get_interface_mac"
with mock.patch(patch_path) as m_get_interfaces_by_mac:
with mock.patch(find_fallback_path) as m_find_fallback:
with mock.patch(get_interface_mac_path) as m_get_mac:
- m_get_interfaces_by_mac.return_value = {mac1: 'eth9'}
- m_find_fallback.return_value = 'eth9'
+ m_get_interfaces_by_mac.return_value = {mac1: "eth9"}
+ m_find_fallback.return_value = "eth9"
m_get_mac.return_value = mac1
self.assertEqual(expected, ds.network_config)
@@ -418,24 +413,33 @@ class TestEc2(test_helpers.HttprettyTestCase):
"""
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': True}}},
- md={'md': DEFAULT_METADATA})
- find_fallback_path = M_PATH_NET + 'find_fallback_nic'
+ sys_cfg={"datasource": {"Ec2": {"strict_id": True}}},
+ md={"md": DEFAULT_METADATA},
+ )
+ find_fallback_path = M_PATH_NET + "find_fallback_nic"
with mock.patch(find_fallback_path) as m_find_fallback:
- m_find_fallback.return_value = 'eth9'
+ m_find_fallback.return_value = "eth9"
ds.get_data()
- mac1 = '06:17:04:d7:26:08' # IPv4 only in DEFAULT_METADATA
- expected = {'version': 2, 'ethernets': {'eth9': {
- 'match': {'macaddress': mac1.lower()}, 'set-name': 'eth9',
- 'dhcp4': True, 'dhcp6': False}}}
- patch_path = M_PATH_NET + 'get_interfaces_by_mac'
- get_interface_mac_path = M_PATH_NET + 'get_interface_mac'
+ mac1 = "06:17:04:d7:26:08" # IPv4 only in DEFAULT_METADATA
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": mac1.lower()},
+ "set-name": "eth9",
+ "dhcp4": True,
+ "dhcp6": False,
+ }
+ },
+ }
+ patch_path = M_PATH_NET + "get_interfaces_by_mac"
+ get_interface_mac_path = M_PATH_NET + "get_interface_mac"
with mock.patch(patch_path) as m_get_interfaces_by_mac:
with mock.patch(find_fallback_path) as m_find_fallback:
with mock.patch(get_interface_mac_path) as m_get_mac:
- m_get_interfaces_by_mac.return_value = {mac1: 'eth9'}
- m_find_fallback.return_value = 'eth9'
+ m_get_interfaces_by_mac.return_value = {mac1: "eth9"}
+ m_find_fallback.return_value = "eth9"
m_get_mac.return_value = mac1
self.assertEqual(expected, ds.network_config)
@@ -447,27 +451,38 @@ class TestEc2(test_helpers.HttprettyTestCase):
"""
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': True}}},
- md={'md': SECONDARY_IP_METADATA_2018_09_24})
- find_fallback_path = M_PATH_NET + 'find_fallback_nic'
+ sys_cfg={"datasource": {"Ec2": {"strict_id": True}}},
+ md={"md": SECONDARY_IP_METADATA_2018_09_24},
+ )
+ find_fallback_path = M_PATH_NET + "find_fallback_nic"
with mock.patch(find_fallback_path) as m_find_fallback:
- m_find_fallback.return_value = 'eth9'
+ m_find_fallback.return_value = "eth9"
ds.get_data()
- mac1 = '0a:07:84:3d:6e:38' # 1 secondary IPv4 and 2 secondary IPv6
- expected = {'version': 2, 'ethernets': {'eth9': {
- 'match': {'macaddress': mac1}, 'set-name': 'eth9',
- 'addresses': ['172.31.45.70/20',
- '2600:1f16:292:100:f152:2222:3333:4444/128',
- '2600:1f16:292:100:f153:12a3:c37c:11f9/128'],
- 'dhcp4': True, 'dhcp6': True}}}
- patch_path = M_PATH_NET + 'get_interfaces_by_mac'
- get_interface_mac_path = M_PATH_NET + 'get_interface_mac'
+ mac1 = "0a:07:84:3d:6e:38" # 1 secondary IPv4 and 2 secondary IPv6
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": mac1},
+ "set-name": "eth9",
+ "addresses": [
+ "172.31.45.70/20",
+ "2600:1f16:292:100:f152:2222:3333:4444/128",
+ "2600:1f16:292:100:f153:12a3:c37c:11f9/128",
+ ],
+ "dhcp4": True,
+ "dhcp6": True,
+ }
+ },
+ }
+ patch_path = M_PATH_NET + "get_interfaces_by_mac"
+ get_interface_mac_path = M_PATH_NET + "get_interface_mac"
with mock.patch(patch_path) as m_get_interfaces_by_mac:
with mock.patch(find_fallback_path) as m_find_fallback:
with mock.patch(get_interface_mac_path) as m_get_mac:
- m_get_interfaces_by_mac.return_value = {mac1: 'eth9'}
- m_find_fallback.return_value = 'eth9'
+ m_get_interfaces_by_mac.return_value = {mac1: "eth9"}
+ m_find_fallback.return_value = "eth9"
m_get_mac.return_value = mac1
self.assertEqual(expected, ds.network_config)
@@ -475,12 +490,13 @@ class TestEc2(test_helpers.HttprettyTestCase):
"""network_config property is cached in DataSourceEc2."""
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': True}}},
- md={'md': DEFAULT_METADATA})
- ds._network_config = {'cached': 'data'}
- self.assertEqual({'cached': 'data'}, ds.network_config)
+ sys_cfg={"datasource": {"Ec2": {"strict_id": True}}},
+ md={"md": DEFAULT_METADATA},
+ )
+ ds._network_config = {"cached": "data"}
+ self.assertEqual({"cached": "data"}, ds.network_config)
- @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
+ @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
def test_network_config_cached_property_refreshed_on_upgrade(self, m_dhcp):
"""Refresh the network_config Ec2 cache if network key is absent.
@@ -488,28 +504,39 @@ class TestEc2(test_helpers.HttprettyTestCase):
which lacked newly required network key.
"""
old_metadata = copy.deepcopy(DEFAULT_METADATA)
- old_metadata.pop('network')
+ old_metadata.pop("network")
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': True}}},
- md={'md': old_metadata})
+ sys_cfg={"datasource": {"Ec2": {"strict_id": True}}},
+ md={"md": old_metadata},
+ )
self.assertTrue(ds.get_data())
# Provide new revision of metadata that contains network data
register_mock_metaserver(
- 'http://169.254.169.254/2009-04-04/meta-data/', DEFAULT_METADATA)
- mac1 = '06:17:04:d7:26:09' # Defined in DEFAULT_METADATA
- get_interface_mac_path = M_PATH_NET + 'get_interfaces_by_mac'
- ds.fallback_nic = 'eth9'
+ "http://169.254.169.254/2009-04-04/meta-data/", DEFAULT_METADATA
+ )
+ mac1 = "06:17:04:d7:26:09" # Defined in DEFAULT_METADATA
+ get_interface_mac_path = M_PATH_NET + "get_interfaces_by_mac"
+ ds.fallback_nic = "eth9"
with mock.patch(get_interface_mac_path) as m_get_interfaces_by_mac:
- m_get_interfaces_by_mac.return_value = {mac1: 'eth9'}
+ m_get_interfaces_by_mac.return_value = {mac1: "eth9"}
nc = ds.network_config # Will re-crawl network metadata
self.assertIsNotNone(nc)
self.assertIn(
- 'Refreshing stale metadata from prior to upgrade',
- self.logs.getvalue())
- expected = {'version': 2, 'ethernets': {'eth9': {
- 'match': {'macaddress': mac1}, 'set-name': 'eth9',
- 'dhcp4': True, 'dhcp6': True}}}
+ "Refreshing stale metadata from prior to upgrade",
+ self.logs.getvalue(),
+ )
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": mac1},
+ "set-name": "eth9",
+ "dhcp4": True,
+ "dhcp6": True,
+ }
+ },
+ }
self.assertEqual(expected, ds.network_config)
def test_ec2_get_instance_id_refreshes_identity_on_upgrade(self):
@@ -522,40 +549,46 @@ class TestEc2(test_helpers.HttprettyTestCase):
self.datasource = ec2.DataSourceEc2Local
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
- md={'md': DEFAULT_METADATA})
+ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
+ md={"md": DEFAULT_METADATA},
+ )
# Mock 404s on all versions except latest
- all_versions = (
- [ds.min_metadata_version] + ds.extended_metadata_versions)
+ all_versions = [
+ ds.min_metadata_version
+ ] + ds.extended_metadata_versions
for ver in all_versions[:-1]:
register_mock_metaserver(
- 'http://169.254.169.254/{0}/meta-data/instance-id'.format(ver),
- None)
- ds.metadata_address = 'http://169.254.169.254'
+ "http://169.254.169.254/{0}/meta-data/instance-id".format(ver),
+ None,
+ )
+ ds.metadata_address = "http://169.254.169.254"
register_mock_metaserver(
- '{0}/{1}/meta-data/'.format(ds.metadata_address, all_versions[-1]),
- DEFAULT_METADATA)
+ "{0}/{1}/meta-data/".format(ds.metadata_address, all_versions[-1]),
+ DEFAULT_METADATA,
+ )
# Register dynamic/instance-identity document which we now read.
register_mock_metaserver(
- '{0}/{1}/dynamic/'.format(ds.metadata_address, all_versions[-1]),
- DYNAMIC_METADATA)
+ "{0}/{1}/dynamic/".format(ds.metadata_address, all_versions[-1]),
+ DYNAMIC_METADATA,
+ )
ds._cloud_name = ec2.CloudNames.AWS
# Setup cached metadata on the Datasource
ds.metadata = DEFAULT_METADATA
- self.assertEqual('my-identity-id', ds.get_instance_id())
+ self.assertEqual("my-identity-id", ds.get_instance_id())
def test_classic_instance_true(self):
"""If no vpc-id in metadata, is_classic_instance must return true."""
md_copy = copy.deepcopy(DEFAULT_METADATA)
- ifaces_md = md_copy.get('network', {}).get('interfaces', {})
- for _mac, mac_data in ifaces_md.get('macs', {}).items():
- if 'vpc-id' in mac_data:
- del mac_data['vpc-id']
+ ifaces_md = md_copy.get("network", {}).get("interfaces", {})
+ for _mac, mac_data in ifaces_md.get("macs", {}).items():
+ if "vpc-id" in mac_data:
+ del mac_data["vpc-id"]
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
- md={'md': md_copy})
+ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
+ md={"md": md_copy},
+ )
self.assertTrue(ds.get_data())
self.assertTrue(ds.is_classic_instance())
@@ -563,8 +596,9 @@ class TestEc2(test_helpers.HttprettyTestCase):
"""If vpc-id in metadata, is_classic_instance must return false."""
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
- md={'md': DEFAULT_METADATA})
+ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
+ md={"md": DEFAULT_METADATA},
+ )
self.assertTrue(ds.get_data())
self.assertFalse(ds.is_classic_instance())
@@ -572,108 +606,117 @@ class TestEc2(test_helpers.HttprettyTestCase):
"""Inaccessibility of http://169.254.169.254 are retried."""
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
- md=None)
+ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
+ md=None,
+ )
conn_error = requests.exceptions.ConnectionError(
- '[Errno 113] no route to host'
+ "[Errno 113] no route to host"
)
- mock_success = mock.MagicMock(contents=b'fakesuccess')
+ mock_success = mock.MagicMock(contents=b"fakesuccess")
mock_success.ok.return_value = True
- with mock.patch('cloudinit.url_helper.readurl') as m_readurl:
+ with mock.patch("cloudinit.url_helper.readurl") as m_readurl:
m_readurl.side_effect = (conn_error, conn_error, mock_success)
- with mock.patch('cloudinit.url_helper.time.sleep'):
+ with mock.patch("cloudinit.url_helper.time.sleep"):
self.assertTrue(ds.wait_for_metadata_service())
# Just one /latest/api/token request
self.assertEqual(3, len(m_readurl.call_args_list))
for readurl_call in m_readurl.call_args_list:
- self.assertIn('latest/api/token', readurl_call[0][0])
+ self.assertIn("latest/api/token", readurl_call[0][0])
def test_aws_token_403_fails_without_retries(self):
"""Verify that 403s fetching AWS tokens are not retried."""
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
- md=None)
- token_url = self.data_url('latest', data_item='api/token')
+ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
+ md=None,
+ )
+ token_url = self.data_url("latest", data_item="api/token")
httpretty.register_uri(httpretty.PUT, token_url, body={}, status=403)
self.assertFalse(ds.get_data())
# Just one /latest/api/token request
logs = self.logs.getvalue()
failed_put_log = '"PUT /latest/api/token HTTP/1.1" 403 0'
expected_logs = [
- 'WARNING: Ec2 IMDS endpoint returned a 403 error. HTTP endpoint is'
- ' disabled. Aborting.',
+ "WARNING: Ec2 IMDS endpoint returned a 403 error. HTTP endpoint is"
+ " disabled. Aborting.",
"WARNING: IMDS's HTTP endpoint is probably disabled",
- failed_put_log
+ failed_put_log,
]
for log in expected_logs:
self.assertIn(log, logs)
self.assertEqual(
1,
- len([line for line in logs.splitlines() if failed_put_log in line])
+ len(
+ [line for line in logs.splitlines() if failed_put_log in line]
+ ),
)
def test_aws_token_redacted(self):
"""Verify that aws tokens are redacted when logged."""
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
- md={'md': DEFAULT_METADATA})
+ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
+ md={"md": DEFAULT_METADATA},
+ )
self.assertTrue(ds.get_data())
all_logs = self.logs.getvalue().splitlines()
REDACT_TTL = "'X-aws-ec2-metadata-token-ttl-seconds': 'REDACTED'"
REDACT_TOK = "'X-aws-ec2-metadata-token': 'REDACTED'"
logs_with_redacted_ttl = [log for log in all_logs if REDACT_TTL in log]
logs_with_redacted = [log for log in all_logs if REDACT_TOK in log]
- logs_with_token = [log for log in all_logs if 'API-TOKEN' in log]
+ logs_with_token = [log for log in all_logs if "API-TOKEN" in log]
self.assertEqual(1, len(logs_with_redacted_ttl))
self.assertEqual(81, len(logs_with_redacted))
self.assertEqual(0, len(logs_with_token))
- @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
+ @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
def test_valid_platform_with_strict_true(self, m_dhcp):
"""Valid platform data should return true with strict_id true."""
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': True}}},
- md={'md': DEFAULT_METADATA})
+ sys_cfg={"datasource": {"Ec2": {"strict_id": True}}},
+ md={"md": DEFAULT_METADATA},
+ )
ret = ds.get_data()
self.assertTrue(ret)
self.assertEqual(0, m_dhcp.call_count)
- self.assertEqual('aws', ds.cloud_name)
- self.assertEqual('ec2', ds.platform_type)
- self.assertEqual('metadata (%s)' % ds.metadata_address, ds.subplatform)
+ self.assertEqual("aws", ds.cloud_name)
+ self.assertEqual("ec2", ds.platform_type)
+ self.assertEqual("metadata (%s)" % ds.metadata_address, ds.subplatform)
def test_valid_platform_with_strict_false(self):
"""Valid platform data should return true with strict_id false."""
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
- md={'md': DEFAULT_METADATA})
+ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
+ md={"md": DEFAULT_METADATA},
+ )
ret = ds.get_data()
self.assertTrue(ret)
def test_unknown_platform_with_strict_true(self):
"""Unknown platform data with strict_id true should return False."""
- uuid = 'ab439480-72bf-11d3-91fc-b8aded755F9a'
+ uuid = "ab439480-72bf-11d3-91fc-b8aded755F9a"
ds = self._setup_ds(
- platform_data={'uuid': uuid, 'uuid_source': 'dmi', 'serial': ''},
- sys_cfg={'datasource': {'Ec2': {'strict_id': True}}},
- md={'md': DEFAULT_METADATA})
+ platform_data={"uuid": uuid, "uuid_source": "dmi", "serial": ""},
+ sys_cfg={"datasource": {"Ec2": {"strict_id": True}}},
+ md={"md": DEFAULT_METADATA},
+ )
ret = ds.get_data()
self.assertFalse(ret)
def test_unknown_platform_with_strict_false(self):
"""Unknown platform data with strict_id false should return True."""
- uuid = 'ab439480-72bf-11d3-91fc-b8aded755F9a'
+ uuid = "ab439480-72bf-11d3-91fc-b8aded755F9a"
ds = self._setup_ds(
- platform_data={'uuid': uuid, 'uuid_source': 'dmi', 'serial': ''},
- sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
- md={'md': DEFAULT_METADATA})
+ platform_data={"uuid": uuid, "uuid_source": "dmi", "serial": ""},
+ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
+ md={"md": DEFAULT_METADATA},
+ )
ret = ds.get_data()
self.assertTrue(ret)
@@ -682,24 +725,28 @@ class TestEc2(test_helpers.HttprettyTestCase):
self.datasource = ec2.DataSourceEc2Local
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
- md={'md': DEFAULT_METADATA})
+ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
+ md={"md": DEFAULT_METADATA},
+ )
platform_attrs = [
- attr for attr in ec2.CloudNames.__dict__.keys()
- if not attr.startswith('__')]
+ attr
+ for attr in ec2.CloudNames.__dict__.keys()
+ if not attr.startswith("__")
+ ]
for attr_name in platform_attrs:
platform_name = getattr(ec2.CloudNames, attr_name)
- if platform_name != 'aws':
+ if platform_name != "aws":
ds._cloud_name = platform_name
ret = ds.get_data()
- self.assertEqual('ec2', ds.platform_type)
+ self.assertEqual("ec2", ds.platform_type)
self.assertFalse(ret)
message = (
"Local Ec2 mode only supported on ('aws',),"
- ' not {0}'.format(platform_name))
+ " not {0}".format(platform_name)
+ )
self.assertIn(message, self.logs.getvalue())
- @mock.patch('cloudinit.sources.DataSourceEc2.util.is_FreeBSD')
+ @mock.patch("cloudinit.sources.DataSourceEc2.util.is_FreeBSD")
def test_ec2_local_returns_false_on_bsd(self, m_is_freebsd):
"""DataSourceEc2Local returns False on BSD.
@@ -709,20 +756,23 @@ class TestEc2(test_helpers.HttprettyTestCase):
self.datasource = ec2.DataSourceEc2Local
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
- md={'md': DEFAULT_METADATA})
+ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
+ md={"md": DEFAULT_METADATA},
+ )
ret = ds.get_data()
self.assertFalse(ret)
self.assertIn(
"FreeBSD doesn't support running dhclient with -sf",
- self.logs.getvalue())
-
- @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network')
- @mock.patch('cloudinit.net.find_fallback_nic')
- @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
- @mock.patch('cloudinit.sources.DataSourceEc2.util.is_FreeBSD')
- def test_ec2_local_performs_dhcp_on_non_bsd(self, m_is_bsd, m_dhcp,
- m_fallback_nic, m_net):
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("cloudinit.net.dhcp.EphemeralIPv4Network")
+ @mock.patch("cloudinit.net.find_fallback_nic")
+ @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+ @mock.patch("cloudinit.sources.DataSourceEc2.util.is_FreeBSD")
+ def test_ec2_local_performs_dhcp_on_non_bsd(
+ self, m_is_bsd, m_dhcp, m_fallback_nic, m_net
+ ):
"""Ec2Local returns True for valid platform data on non-BSD with dhcp.
DataSourceEc2Local will setup initial IPv4 network via dhcp discovery.
@@ -730,31 +780,41 @@ class TestEc2(test_helpers.HttprettyTestCase):
When the platform data is valid, return True.
"""
- m_fallback_nic.return_value = 'eth9'
+ m_fallback_nic.return_value = "eth9"
m_is_bsd.return_value = False
- m_dhcp.return_value = [{
- 'interface': 'eth9', 'fixed-address': '192.168.2.9',
- 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
- 'broadcast-address': '192.168.2.255'}]
+ m_dhcp.return_value = [
+ {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ "broadcast-address": "192.168.2.255",
+ }
+ ]
self.datasource = ec2.DataSourceEc2Local
ds = self._setup_ds(
platform_data=self.valid_platform_data,
- sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
- md={'md': DEFAULT_METADATA})
+ sys_cfg={"datasource": {"Ec2": {"strict_id": False}}},
+ md={"md": DEFAULT_METADATA},
+ )
ret = ds.get_data()
self.assertTrue(ret)
- m_dhcp.assert_called_once_with('eth9', None)
+ m_dhcp.assert_called_once_with("eth9", None)
m_net.assert_called_once_with(
- broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9',
- prefix_or_mask='255.255.255.0', router='192.168.2.1',
- static_routes=None)
- self.assertIn('Crawl of metadata service took', self.logs.getvalue())
+ broadcast="192.168.2.255",
+ interface="eth9",
+ ip="192.168.2.9",
+ prefix_or_mask="255.255.255.0",
+ router="192.168.2.1",
+ static_routes=None,
+ )
+ self.assertIn("Crawl of metadata service took", self.logs.getvalue())
class TestGetSecondaryAddresses(test_helpers.CiTestCase):
- mac = '06:17:04:d7:26:ff'
+ mac = "06:17:04:d7:26:ff"
with_logs = True
def test_md_with_no_secondary_addresses(self):
@@ -764,26 +824,34 @@ class TestGetSecondaryAddresses(test_helpers.CiTestCase):
def test_md_with_secondary_v4_and_v6_addresses(self):
"""All secondary addresses are returned from nic metadata"""
self.assertEqual(
- ['172.31.45.70/20', '2600:1f16:292:100:f152:2222:3333:4444/128',
- '2600:1f16:292:100:f153:12a3:c37c:11f9/128'],
- ec2.get_secondary_addresses(NIC1_MD_IPV4_IPV6_MULTI_IP, self.mac))
+ [
+ "172.31.45.70/20",
+ "2600:1f16:292:100:f152:2222:3333:4444/128",
+ "2600:1f16:292:100:f153:12a3:c37c:11f9/128",
+ ],
+ ec2.get_secondary_addresses(NIC1_MD_IPV4_IPV6_MULTI_IP, self.mac),
+ )
def test_invalid_ipv4_ipv6_cidr_metadata_logged_with_defaults(self):
"""Any invalid subnet-ipv(4|6)-cidr-block values use defaults"""
invalid_cidr_md = copy.deepcopy(NIC1_MD_IPV4_IPV6_MULTI_IP)
- invalid_cidr_md['subnet-ipv4-cidr-block'] = "something-unexpected"
- invalid_cidr_md['subnet-ipv6-cidr-block'] = "not/sure/what/this/is"
+ invalid_cidr_md["subnet-ipv4-cidr-block"] = "something-unexpected"
+ invalid_cidr_md["subnet-ipv6-cidr-block"] = "not/sure/what/this/is"
self.assertEqual(
- ['172.31.45.70/24', '2600:1f16:292:100:f152:2222:3333:4444/128',
- '2600:1f16:292:100:f153:12a3:c37c:11f9/128'],
- ec2.get_secondary_addresses(invalid_cidr_md, self.mac))
+ [
+ "172.31.45.70/24",
+ "2600:1f16:292:100:f152:2222:3333:4444/128",
+ "2600:1f16:292:100:f153:12a3:c37c:11f9/128",
+ ],
+ ec2.get_secondary_addresses(invalid_cidr_md, self.mac),
+ )
expected_logs = [
"WARNING: Could not parse subnet-ipv4-cidr-block"
" something-unexpected for mac 06:17:04:d7:26:ff."
" ipv4 network config prefix defaults to /24",
"WARNING: Could not parse subnet-ipv6-cidr-block"
" not/sure/what/this/is for mac 06:17:04:d7:26:ff."
- " ipv6 network config prefix defaults to /128"
+ " ipv6 network config prefix defaults to /128",
]
logs = self.logs.getvalue()
for log in expected_logs:
@@ -791,188 +859,267 @@ class TestGetSecondaryAddresses(test_helpers.CiTestCase):
class TestConvertEc2MetadataNetworkConfig(test_helpers.CiTestCase):
-
def setUp(self):
super(TestConvertEc2MetadataNetworkConfig, self).setUp()
- self.mac1 = '06:17:04:d7:26:09'
+ self.mac1 = "06:17:04:d7:26:09"
interface_dict = copy.deepcopy(
- DEFAULT_METADATA['network']['interfaces']['macs'][self.mac1])
+ DEFAULT_METADATA["network"]["interfaces"]["macs"][self.mac1]
+ )
# These tests are written assuming the base interface doesn't have IPv6
- interface_dict.pop('ipv6s')
+ interface_dict.pop("ipv6s")
self.network_metadata = {
- 'interfaces': {'macs': {self.mac1: interface_dict}}}
+ "interfaces": {"macs": {self.mac1: interface_dict}}
+ }
def test_convert_ec2_metadata_network_config_skips_absent_macs(self):
"""Any mac absent from metadata is skipped by network config."""
- macs_to_nics = {self.mac1: 'eth9', 'DE:AD:BE:EF:FF:FF': 'vitualnic2'}
+ macs_to_nics = {self.mac1: "eth9", "DE:AD:BE:EF:FF:FF": "vitualnic2"}
# DE:AD:BE:EF:FF:FF represented by OS but not in metadata
- expected = {'version': 2, 'ethernets': {'eth9': {
- 'match': {'macaddress': self.mac1}, 'set-name': 'eth9',
- 'dhcp4': True, 'dhcp6': False}}}
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": self.mac1},
+ "set-name": "eth9",
+ "dhcp4": True,
+ "dhcp6": False,
+ }
+ },
+ }
self.assertEqual(
expected,
ec2.convert_ec2_metadata_network_config(
- self.network_metadata, macs_to_nics))
+ self.network_metadata, macs_to_nics
+ ),
+ )
def test_convert_ec2_metadata_network_config_handles_only_dhcp6(self):
"""Config dhcp6 when ipv6s is in metadata for a mac."""
- macs_to_nics = {self.mac1: 'eth9'}
+ macs_to_nics = {self.mac1: "eth9"}
network_metadata_ipv6 = copy.deepcopy(self.network_metadata)
- nic1_metadata = (
- network_metadata_ipv6['interfaces']['macs'][self.mac1])
- nic1_metadata['ipv6s'] = '2620:0:1009:fd00:e442:c88d:c04d:dc85/64'
- nic1_metadata.pop('public-ipv4s')
- expected = {'version': 2, 'ethernets': {'eth9': {
- 'match': {'macaddress': self.mac1}, 'set-name': 'eth9',
- 'dhcp4': True, 'dhcp6': True}}}
+ nic1_metadata = network_metadata_ipv6["interfaces"]["macs"][self.mac1]
+ nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64"
+ nic1_metadata.pop("public-ipv4s")
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": self.mac1},
+ "set-name": "eth9",
+ "dhcp4": True,
+ "dhcp6": True,
+ }
+ },
+ }
self.assertEqual(
expected,
ec2.convert_ec2_metadata_network_config(
- network_metadata_ipv6, macs_to_nics))
+ network_metadata_ipv6, macs_to_nics
+ ),
+ )
def test_convert_ec2_metadata_network_config_local_only_dhcp4(self):
"""Config dhcp4 when there are no public addresses in public-ipv4s."""
- macs_to_nics = {self.mac1: 'eth9'}
+ macs_to_nics = {self.mac1: "eth9"}
network_metadata_ipv6 = copy.deepcopy(self.network_metadata)
- nic1_metadata = (
- network_metadata_ipv6['interfaces']['macs'][self.mac1])
- nic1_metadata['local-ipv4s'] = '172.3.3.15'
- nic1_metadata.pop('public-ipv4s')
- expected = {'version': 2, 'ethernets': {'eth9': {
- 'match': {'macaddress': self.mac1}, 'set-name': 'eth9',
- 'dhcp4': True, 'dhcp6': False}}}
+ nic1_metadata = network_metadata_ipv6["interfaces"]["macs"][self.mac1]
+ nic1_metadata["local-ipv4s"] = "172.3.3.15"
+ nic1_metadata.pop("public-ipv4s")
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": self.mac1},
+ "set-name": "eth9",
+ "dhcp4": True,
+ "dhcp6": False,
+ }
+ },
+ }
self.assertEqual(
expected,
ec2.convert_ec2_metadata_network_config(
- network_metadata_ipv6, macs_to_nics))
+ network_metadata_ipv6, macs_to_nics
+ ),
+ )
def test_convert_ec2_metadata_network_config_handles_absent_dhcp4(self):
"""Config dhcp4 on fallback_nic when there are no ipv4 addresses."""
- macs_to_nics = {self.mac1: 'eth9'}
+ macs_to_nics = {self.mac1: "eth9"}
network_metadata_ipv6 = copy.deepcopy(self.network_metadata)
- nic1_metadata = (
- network_metadata_ipv6['interfaces']['macs'][self.mac1])
- nic1_metadata['public-ipv4s'] = ''
+ nic1_metadata = network_metadata_ipv6["interfaces"]["macs"][self.mac1]
+ nic1_metadata["public-ipv4s"] = ""
# When no ipv4 or ipv6 content but fallback_nic set, set dhcp4 config.
- expected = {'version': 2, 'ethernets': {'eth9': {
- 'match': {'macaddress': self.mac1}, 'set-name': 'eth9',
- 'dhcp4': True, 'dhcp6': False}}}
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": self.mac1},
+ "set-name": "eth9",
+ "dhcp4": True,
+ "dhcp6": False,
+ }
+ },
+ }
self.assertEqual(
expected,
ec2.convert_ec2_metadata_network_config(
- network_metadata_ipv6, macs_to_nics, fallback_nic='eth9'))
+ network_metadata_ipv6, macs_to_nics, fallback_nic="eth9"
+ ),
+ )
def test_convert_ec2_metadata_network_config_handles_local_v4_and_v6(self):
"""When ipv6s and local-ipv4s are non-empty, enable dhcp6 and dhcp4."""
- macs_to_nics = {self.mac1: 'eth9'}
+ macs_to_nics = {self.mac1: "eth9"}
network_metadata_both = copy.deepcopy(self.network_metadata)
- nic1_metadata = (
- network_metadata_both['interfaces']['macs'][self.mac1])
- nic1_metadata['ipv6s'] = '2620:0:1009:fd00:e442:c88d:c04d:dc85/64'
- nic1_metadata.pop('public-ipv4s')
- nic1_metadata['local-ipv4s'] = '10.0.0.42' # Local ipv4 only on vpc
- expected = {'version': 2, 'ethernets': {'eth9': {
- 'match': {'macaddress': self.mac1}, 'set-name': 'eth9',
- 'dhcp4': True, 'dhcp6': True}}}
+ nic1_metadata = network_metadata_both["interfaces"]["macs"][self.mac1]
+ nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64"
+ nic1_metadata.pop("public-ipv4s")
+ nic1_metadata["local-ipv4s"] = "10.0.0.42" # Local ipv4 only on vpc
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": self.mac1},
+ "set-name": "eth9",
+ "dhcp4": True,
+ "dhcp6": True,
+ }
+ },
+ }
self.assertEqual(
expected,
ec2.convert_ec2_metadata_network_config(
- network_metadata_both, macs_to_nics))
+ network_metadata_both, macs_to_nics
+ ),
+ )
def test_convert_ec2_metadata_network_config_handles_multiple_nics(self):
"""DHCP route-metric increases on secondary NICs for IPv4 and IPv6."""
- mac2 = '06:17:04:d7:26:08'
- macs_to_nics = {self.mac1: 'eth9', mac2: 'eth10'}
+ mac2 = "06:17:04:d7:26:08"
+ macs_to_nics = {self.mac1: "eth9", mac2: "eth10"}
network_metadata_both = copy.deepcopy(self.network_metadata)
# Add 2nd nic info
- network_metadata_both['interfaces']['macs'][mac2] = NIC2_MD
- nic1_metadata = (
- network_metadata_both['interfaces']['macs'][self.mac1])
- nic1_metadata['ipv6s'] = '2620:0:1009:fd00:e442:c88d:c04d:dc85/64'
- nic1_metadata.pop('public-ipv4s') # No public-ipv4 IPs in cfg
- nic1_metadata['local-ipv4s'] = '10.0.0.42' # Local ipv4 only on vpc
- expected = {'version': 2, 'ethernets': {
- 'eth9': {
- 'match': {'macaddress': self.mac1}, 'set-name': 'eth9',
- 'dhcp4': True, 'dhcp4-overrides': {'route-metric': 100},
- 'dhcp6': True, 'dhcp6-overrides': {'route-metric': 100}},
- 'eth10': {
- 'match': {'macaddress': mac2}, 'set-name': 'eth10',
- 'dhcp4': True, 'dhcp4-overrides': {'route-metric': 200},
- 'dhcp6': False}}}
+ network_metadata_both["interfaces"]["macs"][mac2] = NIC2_MD
+ nic1_metadata = network_metadata_both["interfaces"]["macs"][self.mac1]
+ nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64"
+ nic1_metadata.pop("public-ipv4s") # No public-ipv4 IPs in cfg
+ nic1_metadata["local-ipv4s"] = "10.0.0.42" # Local ipv4 only on vpc
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": self.mac1},
+ "set-name": "eth9",
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ "dhcp6": True,
+ "dhcp6-overrides": {"route-metric": 100},
+ },
+ "eth10": {
+ "match": {"macaddress": mac2},
+ "set-name": "eth10",
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 200},
+ "dhcp6": False,
+ },
+ },
+ }
self.assertEqual(
expected,
ec2.convert_ec2_metadata_network_config(
- network_metadata_both, macs_to_nics))
+ network_metadata_both, macs_to_nics
+ ),
+ )
def test_convert_ec2_metadata_network_config_handles_dhcp4_and_dhcp6(self):
"""Config both dhcp4 and dhcp6 when both vpc-ipv6 and ipv4 exists."""
- macs_to_nics = {self.mac1: 'eth9'}
+ macs_to_nics = {self.mac1: "eth9"}
network_metadata_both = copy.deepcopy(self.network_metadata)
- nic1_metadata = (
- network_metadata_both['interfaces']['macs'][self.mac1])
- nic1_metadata['ipv6s'] = '2620:0:1009:fd00:e442:c88d:c04d:dc85/64'
- expected = {'version': 2, 'ethernets': {'eth9': {
- 'match': {'macaddress': self.mac1}, 'set-name': 'eth9',
- 'dhcp4': True, 'dhcp6': True}}}
+ nic1_metadata = network_metadata_both["interfaces"]["macs"][self.mac1]
+ nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64"
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": self.mac1},
+ "set-name": "eth9",
+ "dhcp4": True,
+ "dhcp6": True,
+ }
+ },
+ }
self.assertEqual(
expected,
ec2.convert_ec2_metadata_network_config(
- network_metadata_both, macs_to_nics))
+ network_metadata_both, macs_to_nics
+ ),
+ )
def test_convert_ec2_metadata_gets_macs_from_get_interfaces_by_mac(self):
"""Convert Ec2 Metadata calls get_interfaces_by_mac by default."""
- expected = {'version': 2, 'ethernets': {'eth9': {
- 'match': {'macaddress': self.mac1},
- 'set-name': 'eth9', 'dhcp4': True, 'dhcp6': False}}}
- patch_path = M_PATH_NET + 'get_interfaces_by_mac'
+ expected = {
+ "version": 2,
+ "ethernets": {
+ "eth9": {
+ "match": {"macaddress": self.mac1},
+ "set-name": "eth9",
+ "dhcp4": True,
+ "dhcp6": False,
+ }
+ },
+ }
+ patch_path = M_PATH_NET + "get_interfaces_by_mac"
with mock.patch(patch_path) as m_get_interfaces_by_mac:
- m_get_interfaces_by_mac.return_value = {self.mac1: 'eth9'}
+ m_get_interfaces_by_mac.return_value = {self.mac1: "eth9"}
self.assertEqual(
expected,
- ec2.convert_ec2_metadata_network_config(self.network_metadata))
+ ec2.convert_ec2_metadata_network_config(self.network_metadata),
+ )
class TesIdentifyPlatform(test_helpers.CiTestCase):
-
def collmock(self, **kwargs):
"""return non-special _collect_platform_data updated with changes."""
unspecial = {
- 'asset_tag': '3857-0037-2746-7462-1818-3997-77',
- 'serial': 'H23-C4J3JV-R6',
- 'uuid': '81c7e555-6471-4833-9551-1ab366c4cfd2',
- 'uuid_source': 'dmi',
- 'vendor': 'tothecloud',
+ "asset_tag": "3857-0037-2746-7462-1818-3997-77",
+ "serial": "H23-C4J3JV-R6",
+ "uuid": "81c7e555-6471-4833-9551-1ab366c4cfd2",
+ "uuid_source": "dmi",
+ "vendor": "tothecloud",
}
unspecial.update(**kwargs)
return unspecial
- @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data')
+ @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data")
def test_identify_zstack(self, m_collect):
- """zstack should be identified if chassis-asset-tag ends in .zstack.io
+ """zstack should be identified if chassis-asset-tag
+ ends in .zstack.io
"""
- m_collect.return_value = self.collmock(asset_tag='123456.zstack.io')
+ m_collect.return_value = self.collmock(asset_tag="123456.zstack.io")
self.assertEqual(ec2.CloudNames.ZSTACK, ec2.identify_platform())
- @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data')
+ @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data")
def test_identify_zstack_full_domain_only(self, m_collect):
- """zstack asset-tag matching should match only on full domain boundary.
+ """zstack asset-tag matching should match only on
+ full domain boundary.
"""
- m_collect.return_value = self.collmock(asset_tag='123456.buzzstack.io')
+ m_collect.return_value = self.collmock(asset_tag="123456.buzzstack.io")
self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform())
- @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data')
+ @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data")
def test_identify_e24cloud(self, m_collect):
"""e24cloud identified if vendor is e24cloud"""
- m_collect.return_value = self.collmock(vendor='e24cloud')
+ m_collect.return_value = self.collmock(vendor="e24cloud")
self.assertEqual(ec2.CloudNames.E24CLOUD, ec2.identify_platform())
- @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data')
+ @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data")
def test_identify_e24cloud_negative(self, m_collect):
"""e24cloud identified if vendor is e24cloud"""
- m_collect.return_value = self.collmock(vendor='e24cloudyday')
+ m_collect.return_value = self.collmock(vendor="e24cloudyday")
self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform())
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_exoscale.py b/tests/unittests/sources/test_exoscale.py
new file mode 100644
index 00000000..591256d8
--- /dev/null
+++ b/tests/unittests/sources/test_exoscale.py
@@ -0,0 +1,241 @@
+# Author: Mathieu Corbin <mathieu.corbin@exoscale.com>
+# Author: Christopher Glass <christopher.glass@exoscale.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+import os
+
+import httpretty
+import requests
+
+from cloudinit import helpers, util
+from cloudinit.sources.DataSourceExoscale import (
+ API_VERSION,
+ METADATA_URL,
+ PASSWORD_SERVER_PORT,
+ DataSourceExoscale,
+ get_password,
+ read_metadata,
+)
+from tests.unittests.helpers import HttprettyTestCase, mock
+
+TEST_PASSWORD_URL = "{}:{}/{}/".format(
+ METADATA_URL, PASSWORD_SERVER_PORT, API_VERSION
+)
+
+TEST_METADATA_URL = "{}/{}/meta-data/".format(METADATA_URL, API_VERSION)
+
+TEST_USERDATA_URL = "{}/{}/user-data".format(METADATA_URL, API_VERSION)
+
+
+@httpretty.activate
+class TestDatasourceExoscale(HttprettyTestCase):
+ def setUp(self):
+ super(TestDatasourceExoscale, self).setUp()
+ self.tmp = self.tmp_dir()
+ self.password_url = TEST_PASSWORD_URL
+ self.metadata_url = TEST_METADATA_URL
+ self.userdata_url = TEST_USERDATA_URL
+
+ def test_password_saved(self):
+ """The password is not set when it is not found
+ in the metadata service."""
+ httpretty.register_uri(
+ httpretty.GET, self.password_url, body="saved_password"
+ )
+ self.assertFalse(get_password())
+
+ def test_password_empty(self):
+ """No password is set if the metadata service returns
+ an empty string."""
+ httpretty.register_uri(httpretty.GET, self.password_url, body="")
+ self.assertFalse(get_password())
+
+ def test_password(self):
+ """The password is set to what is found in the metadata
+ service."""
+ expected_password = "p@ssw0rd"
+ httpretty.register_uri(
+ httpretty.GET, self.password_url, body=expected_password
+ )
+ password = get_password()
+ self.assertEqual(expected_password, password)
+
+ def test_activate_removes_set_passwords_semaphore(self):
+ """Allow set_passwords to run every boot by removing the semaphore."""
+ path = helpers.Paths({"cloud_dir": self.tmp})
+ sem_dir = self.tmp_path("instance/sem", dir=self.tmp)
+ util.ensure_dir(sem_dir)
+ sem_file = os.path.join(sem_dir, "config_set_passwords")
+ with open(sem_file, "w") as stream:
+ stream.write("")
+ ds = DataSourceExoscale({}, None, path)
+ ds.activate(None, None)
+ self.assertFalse(os.path.exists(sem_file))
+
+ def test_get_data(self):
+ """The datasource conforms to expected behavior when supplied
+ full test data."""
+ path = helpers.Paths({"run_dir": self.tmp})
+ ds = DataSourceExoscale({}, None, path)
+ ds._is_platform_viable = lambda: True
+ expected_password = "p@ssw0rd"
+ expected_id = "12345"
+ expected_hostname = "myname"
+ expected_userdata = "#cloud-config"
+ httpretty.register_uri(
+ httpretty.GET, self.userdata_url, body=expected_userdata
+ )
+ httpretty.register_uri(
+ httpretty.GET, self.password_url, body=expected_password
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ self.metadata_url,
+ body="instance-id\nlocal-hostname",
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "{}local-hostname".format(self.metadata_url),
+ body=expected_hostname,
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "{}instance-id".format(self.metadata_url),
+ body=expected_id,
+ )
+ self.assertTrue(ds._get_data())
+ self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config")
+ self.assertEqual(
+ ds.metadata,
+ {"instance-id": expected_id, "local-hostname": expected_hostname},
+ )
+ self.assertEqual(
+ ds.get_config_obj(),
+ {
+ "ssh_pwauth": True,
+ "password": expected_password,
+ "chpasswd": {
+ "expire": False,
+ },
+ },
+ )
+
+ def test_get_data_saved_password(self):
+ """The datasource conforms to expected behavior when saved_password is
+ returned by the password server."""
+ path = helpers.Paths({"run_dir": self.tmp})
+ ds = DataSourceExoscale({}, None, path)
+ ds._is_platform_viable = lambda: True
+ expected_answer = "saved_password"
+ expected_id = "12345"
+ expected_hostname = "myname"
+ expected_userdata = "#cloud-config"
+ httpretty.register_uri(
+ httpretty.GET, self.userdata_url, body=expected_userdata
+ )
+ httpretty.register_uri(
+ httpretty.GET, self.password_url, body=expected_answer
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ self.metadata_url,
+ body="instance-id\nlocal-hostname",
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "{}local-hostname".format(self.metadata_url),
+ body=expected_hostname,
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "{}instance-id".format(self.metadata_url),
+ body=expected_id,
+ )
+ self.assertTrue(ds._get_data())
+ self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config")
+ self.assertEqual(
+ ds.metadata,
+ {"instance-id": expected_id, "local-hostname": expected_hostname},
+ )
+ self.assertEqual(ds.get_config_obj(), {})
+
+ def test_get_data_no_password(self):
+ """The datasource conforms to expected behavior when no password is
+ returned by the password server."""
+ path = helpers.Paths({"run_dir": self.tmp})
+ ds = DataSourceExoscale({}, None, path)
+ ds._is_platform_viable = lambda: True
+ expected_answer = ""
+ expected_id = "12345"
+ expected_hostname = "myname"
+ expected_userdata = "#cloud-config"
+ httpretty.register_uri(
+ httpretty.GET, self.userdata_url, body=expected_userdata
+ )
+ httpretty.register_uri(
+ httpretty.GET, self.password_url, body=expected_answer
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ self.metadata_url,
+ body="instance-id\nlocal-hostname",
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "{}local-hostname".format(self.metadata_url),
+ body=expected_hostname,
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "{}instance-id".format(self.metadata_url),
+ body=expected_id,
+ )
+ self.assertTrue(ds._get_data())
+ self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config")
+ self.assertEqual(
+ ds.metadata,
+ {"instance-id": expected_id, "local-hostname": expected_hostname},
+ )
+ self.assertEqual(ds.get_config_obj(), {})
+
+ @mock.patch("cloudinit.sources.DataSourceExoscale.get_password")
+ def test_read_metadata_when_password_server_unreachable(self, m_password):
+ """The read_metadata function returns partial results in case the
+ password server (only) is unreachable."""
+ expected_id = "12345"
+ expected_hostname = "myname"
+ expected_userdata = "#cloud-config"
+
+ m_password.side_effect = requests.Timeout("Fake Connection Timeout")
+ httpretty.register_uri(
+ httpretty.GET, self.userdata_url, body=expected_userdata
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ self.metadata_url,
+ body="instance-id\nlocal-hostname",
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "{}local-hostname".format(self.metadata_url),
+ body=expected_hostname,
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "{}instance-id".format(self.metadata_url),
+ body=expected_id,
+ )
+
+ result = read_metadata()
+
+ self.assertIsNone(result.get("password"))
+ self.assertEqual(
+ result.get("user-data").decode("utf-8"), expected_userdata
+ )
+
+ def test_non_viable_platform(self):
+ """The datasource fails fast when the platform is not viable."""
+ path = helpers.Paths({"run_dir": self.tmp})
+ ds = DataSourceExoscale({}, None, path)
+ ds._is_platform_viable = lambda: False
+ self.assertFalse(ds._get_data())
diff --git a/tests/unittests/sources/test_gce.py b/tests/unittests/sources/test_gce.py
new file mode 100644
index 00000000..e030931b
--- /dev/null
+++ b/tests/unittests/sources/test_gce.py
@@ -0,0 +1,416 @@
+# Copyright (C) 2014 Vaidas Jablonskis
+#
+# Author: Vaidas Jablonskis <jablonskis@gmail.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import datetime
+import json
+import re
+from base64 import b64decode, b64encode
+from unittest import mock
+from urllib.parse import urlparse
+
+import httpretty
+
+from cloudinit import distros, helpers, settings
+from cloudinit.sources import DataSourceGCE
+from tests.unittests import helpers as test_helpers
+
+GCE_META = {
+ "instance/id": "123",
+ "instance/zone": "foo/bar",
+ "instance/hostname": "server.project-foo.local",
+}
+
+GCE_META_PARTIAL = {
+ "instance/id": "1234",
+ "instance/hostname": "server.project-bar.local",
+ "instance/zone": "bar/baz",
+}
+
+GCE_META_ENCODING = {
+ "instance/id": "12345",
+ "instance/hostname": "server.project-baz.local",
+ "instance/zone": "baz/bang",
+ "instance/attributes": {
+ "user-data": b64encode(b"#!/bin/echo baz\n").decode("utf-8"),
+ "user-data-encoding": "base64",
+ },
+}
+
+GCE_USER_DATA_TEXT = {
+ "instance/id": "12345",
+ "instance/hostname": "server.project-baz.local",
+ "instance/zone": "baz/bang",
+ "instance/attributes": {
+ "user-data": "#!/bin/sh\necho hi mom\ntouch /run/up-now\n",
+ },
+}
+
+HEADERS = {"Metadata-Flavor": "Google"}
+MD_URL_RE = re.compile(
+ r"http://metadata.google.internal/computeMetadata/v1/.*"
+)
+GUEST_ATTRIBUTES_URL = (
+ "http://metadata.google.internal/computeMetadata/"
+ "v1/instance/guest-attributes/hostkeys/"
+)
+
+
+def _set_mock_metadata(gce_meta=None):
+ if gce_meta is None:
+ gce_meta = GCE_META
+
+ def _request_callback(method, uri, headers):
+ url_path = urlparse(uri).path
+ if url_path.startswith("/computeMetadata/v1/"):
+ path = url_path.split("/computeMetadata/v1/")[1:][0]
+ recursive = path.endswith("/")
+ path = path.rstrip("/")
+ else:
+ path = None
+ if path in gce_meta:
+ response = gce_meta.get(path)
+ if recursive:
+ response = json.dumps(response)
+ return (200, headers, response)
+ else:
+ return (404, headers, "")
+
+ # reset is needed. https://github.com/gabrielfalcao/HTTPretty/issues/316
+ httpretty.register_uri(httpretty.GET, MD_URL_RE, body=_request_callback)
+
+
+@httpretty.activate
+class TestDataSourceGCE(test_helpers.HttprettyTestCase):
+ def _make_distro(self, dtype, def_user=None):
+ cfg = dict(settings.CFG_BUILTIN)
+ cfg["system_info"]["distro"] = dtype
+ paths = helpers.Paths(cfg["system_info"]["paths"])
+ distro_cls = distros.fetch(dtype)
+ if def_user:
+ cfg["system_info"]["default_user"] = def_user.copy()
+ distro = distro_cls(dtype, cfg["system_info"], paths)
+ return distro
+
+ def setUp(self):
+ tmp = self.tmp_dir()
+ self.ds = DataSourceGCE.DataSourceGCE(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": tmp})
+ )
+ ppatch = self.m_platform_reports_gce = mock.patch(
+ "cloudinit.sources.DataSourceGCE.platform_reports_gce"
+ )
+ self.m_platform_reports_gce = ppatch.start()
+ self.m_platform_reports_gce.return_value = True
+ self.addCleanup(ppatch.stop)
+ self.add_patch("time.sleep", "m_sleep") # just to speed up tests
+ super(TestDataSourceGCE, self).setUp()
+
+ def test_connection(self):
+ _set_mock_metadata()
+ success = self.ds.get_data()
+ self.assertTrue(success)
+
+ req_header = httpretty.last_request().headers
+ for header_name, expected_value in HEADERS.items():
+ self.assertEqual(expected_value, req_header.get(header_name))
+
+ def test_metadata(self):
+ # UnicodeDecodeError if set to ds.userdata instead of userdata_raw
+ meta = GCE_META.copy()
+ meta["instance/attributes/user-data"] = b"/bin/echo \xff\n"
+
+ _set_mock_metadata()
+ self.ds.get_data()
+
+ shostname = GCE_META.get("instance/hostname").split(".")[0]
+ self.assertEqual(shostname, self.ds.get_hostname())
+
+ self.assertEqual(
+ GCE_META.get("instance/id"), self.ds.get_instance_id()
+ )
+
+ self.assertEqual(
+ GCE_META.get("instance/attributes/user-data"),
+ self.ds.get_userdata_raw(),
+ )
+
+ # test partial metadata (missing user-data in particular)
+ def test_metadata_partial(self):
+ _set_mock_metadata(GCE_META_PARTIAL)
+ self.ds.get_data()
+
+ self.assertEqual(
+ GCE_META_PARTIAL.get("instance/id"), self.ds.get_instance_id()
+ )
+
+ shostname = GCE_META_PARTIAL.get("instance/hostname").split(".")[0]
+ self.assertEqual(shostname, self.ds.get_hostname())
+
+ def test_userdata_no_encoding(self):
+ """check that user-data is read."""
+ _set_mock_metadata(GCE_USER_DATA_TEXT)
+ self.ds.get_data()
+ self.assertEqual(
+ GCE_USER_DATA_TEXT["instance/attributes"]["user-data"].encode(),
+ self.ds.get_userdata_raw(),
+ )
+
+ def test_metadata_encoding(self):
+ """user-data is base64 encoded if user-data-encoding is 'base64'."""
+ _set_mock_metadata(GCE_META_ENCODING)
+ self.ds.get_data()
+
+ instance_data = GCE_META_ENCODING.get("instance/attributes")
+ decoded = b64decode(instance_data.get("user-data"))
+ self.assertEqual(decoded, self.ds.get_userdata_raw())
+
+ def test_missing_required_keys_return_false(self):
+ for required_key in [
+ "instance/id",
+ "instance/zone",
+ "instance/hostname",
+ ]:
+ meta = GCE_META_PARTIAL.copy()
+ del meta[required_key]
+ _set_mock_metadata(meta)
+ self.assertEqual(False, self.ds.get_data())
+ httpretty.reset()
+
+ def test_no_ssh_keys_metadata(self):
+ _set_mock_metadata()
+ self.ds.get_data()
+ self.assertEqual([], self.ds.get_public_ssh_keys())
+
+ def test_cloudinit_ssh_keys(self):
+ valid_key = "ssh-rsa VALID {0}"
+ invalid_key = "ssh-rsa INVALID {0}"
+ project_attributes = {
+ "sshKeys": "\n".join(
+ [
+ "cloudinit:{0}".format(valid_key.format(0)),
+ "user:{0}".format(invalid_key.format(0)),
+ ]
+ ),
+ "ssh-keys": "\n".join(
+ [
+ "cloudinit:{0}".format(valid_key.format(1)),
+ "user:{0}".format(invalid_key.format(1)),
+ ]
+ ),
+ }
+ instance_attributes = {
+ "ssh-keys": "\n".join(
+ [
+ "cloudinit:{0}".format(valid_key.format(2)),
+ "user:{0}".format(invalid_key.format(2)),
+ ]
+ ),
+ "block-project-ssh-keys": "False",
+ }
+
+ meta = GCE_META.copy()
+ meta["project/attributes"] = project_attributes
+ meta["instance/attributes"] = instance_attributes
+
+ _set_mock_metadata(meta)
+ self.ds.get_data()
+
+ expected = [valid_key.format(key) for key in range(3)]
+ self.assertEqual(set(expected), set(self.ds.get_public_ssh_keys()))
+
+ @mock.patch("cloudinit.sources.DataSourceGCE.ug_util")
+ def test_default_user_ssh_keys(self, mock_ug_util):
+ mock_ug_util.normalize_users_groups.return_value = None, None
+ mock_ug_util.extract_default.return_value = "ubuntu", None
+ ubuntu_ds = DataSourceGCE.DataSourceGCE(
+ settings.CFG_BUILTIN,
+ self._make_distro("ubuntu"),
+ helpers.Paths({"run_dir": self.tmp_dir()}),
+ )
+
+ valid_key = "ssh-rsa VALID {0}"
+ invalid_key = "ssh-rsa INVALID {0}"
+ project_attributes = {
+ "sshKeys": "\n".join(
+ [
+ "ubuntu:{0}".format(valid_key.format(0)),
+ "user:{0}".format(invalid_key.format(0)),
+ ]
+ ),
+ "ssh-keys": "\n".join(
+ [
+ "ubuntu:{0}".format(valid_key.format(1)),
+ "user:{0}".format(invalid_key.format(1)),
+ ]
+ ),
+ }
+ instance_attributes = {
+ "ssh-keys": "\n".join(
+ [
+ "ubuntu:{0}".format(valid_key.format(2)),
+ "user:{0}".format(invalid_key.format(2)),
+ ]
+ ),
+ "block-project-ssh-keys": "False",
+ }
+
+ meta = GCE_META.copy()
+ meta["project/attributes"] = project_attributes
+ meta["instance/attributes"] = instance_attributes
+
+ _set_mock_metadata(meta)
+ ubuntu_ds.get_data()
+
+ expected = [valid_key.format(key) for key in range(3)]
+ self.assertEqual(set(expected), set(ubuntu_ds.get_public_ssh_keys()))
+
+ def test_instance_ssh_keys_override(self):
+ valid_key = "ssh-rsa VALID {0}"
+ invalid_key = "ssh-rsa INVALID {0}"
+ project_attributes = {
+ "sshKeys": "cloudinit:{0}".format(invalid_key.format(0)),
+ "ssh-keys": "cloudinit:{0}".format(invalid_key.format(1)),
+ }
+ instance_attributes = {
+ "sshKeys": "cloudinit:{0}".format(valid_key.format(0)),
+ "ssh-keys": "cloudinit:{0}".format(valid_key.format(1)),
+ "block-project-ssh-keys": "False",
+ }
+
+ meta = GCE_META.copy()
+ meta["project/attributes"] = project_attributes
+ meta["instance/attributes"] = instance_attributes
+
+ _set_mock_metadata(meta)
+ self.ds.get_data()
+
+ expected = [valid_key.format(key) for key in range(2)]
+ self.assertEqual(set(expected), set(self.ds.get_public_ssh_keys()))
+
+ def test_block_project_ssh_keys_override(self):
+ valid_key = "ssh-rsa VALID {0}"
+ invalid_key = "ssh-rsa INVALID {0}"
+ project_attributes = {
+ "sshKeys": "cloudinit:{0}".format(invalid_key.format(0)),
+ "ssh-keys": "cloudinit:{0}".format(invalid_key.format(1)),
+ }
+ instance_attributes = {
+ "ssh-keys": "cloudinit:{0}".format(valid_key.format(0)),
+ "block-project-ssh-keys": "True",
+ }
+
+ meta = GCE_META.copy()
+ meta["project/attributes"] = project_attributes
+ meta["instance/attributes"] = instance_attributes
+
+ _set_mock_metadata(meta)
+ self.ds.get_data()
+
+ expected = [valid_key.format(0)]
+ self.assertEqual(set(expected), set(self.ds.get_public_ssh_keys()))
+
+ def test_only_last_part_of_zone_used_for_availability_zone(self):
+ _set_mock_metadata()
+ r = self.ds.get_data()
+ self.assertEqual(True, r)
+ self.assertEqual("bar", self.ds.availability_zone)
+
+ @mock.patch("cloudinit.sources.DataSourceGCE.GoogleMetadataFetcher")
+ def test_get_data_returns_false_if_not_on_gce(self, m_fetcher):
+ self.m_platform_reports_gce.return_value = False
+ ret = self.ds.get_data()
+ self.assertEqual(False, ret)
+ m_fetcher.assert_not_called()
+
+ def test_has_expired(self):
+ def _get_timestamp(days):
+ format_str = "%Y-%m-%dT%H:%M:%S+0000"
+ today = datetime.datetime.now()
+ timestamp = today + datetime.timedelta(days=days)
+ return timestamp.strftime(format_str)
+
+ past = _get_timestamp(-1)
+ future = _get_timestamp(1)
+ ssh_keys = {
+ None: False,
+ "": False,
+ "Invalid": False,
+ "user:ssh-rsa key user@domain.com": False,
+ 'user:ssh-rsa key google {"expireOn":"%s"}' % past: False,
+ "user:ssh-rsa key google-ssh": False,
+ "user:ssh-rsa key google-ssh {invalid:json}": False,
+ 'user:ssh-rsa key google-ssh {"userName":"user"}': False,
+ 'user:ssh-rsa key google-ssh {"expireOn":"invalid"}': False,
+ 'user:xyz key google-ssh {"expireOn":"%s"}' % future: False,
+ 'user:xyz key google-ssh {"expireOn":"%s"}' % past: True,
+ }
+
+ for key, expired in ssh_keys.items():
+ self.assertEqual(DataSourceGCE._has_expired(key), expired)
+
+ def test_parse_public_keys_non_ascii(self):
+ public_key_data = [
+ "cloudinit:rsa ssh-ke%s invalid" % chr(165),
+ "use%sname:rsa ssh-key" % chr(174),
+ "cloudinit:test 1",
+ "default:test 2",
+ "user:test 3",
+ ]
+ expected = ["test 1", "test 2"]
+ found = DataSourceGCE._parse_public_keys(
+ public_key_data, default_user="default"
+ )
+ self.assertEqual(sorted(found), sorted(expected))
+
+ @mock.patch("cloudinit.url_helper.readurl")
+ def test_publish_host_keys(self, m_readurl):
+ hostkeys = [("ssh-rsa", "asdfasdf"), ("ssh-ed25519", "qwerqwer")]
+ readurl_expected_calls = [
+ mock.call(
+ check_status=False,
+ data=b"asdfasdf",
+ headers=HEADERS,
+ request_method="PUT",
+ url="%s%s" % (GUEST_ATTRIBUTES_URL, "ssh-rsa"),
+ ),
+ mock.call(
+ check_status=False,
+ data=b"qwerqwer",
+ headers=HEADERS,
+ request_method="PUT",
+ url="%s%s" % (GUEST_ATTRIBUTES_URL, "ssh-ed25519"),
+ ),
+ ]
+ self.ds.publish_host_keys(hostkeys)
+ m_readurl.assert_has_calls(readurl_expected_calls, any_order=True)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceGCE.EphemeralDHCPv4",
+ autospec=True,
+ )
+ @mock.patch(
+ "cloudinit.sources.DataSourceGCE.DataSourceGCELocal.fallback_interface"
+ )
+ def test_local_datasource_uses_ephemeral_dhcp(self, _m_fallback, m_dhcp):
+ _set_mock_metadata()
+ ds = DataSourceGCE.DataSourceGCELocal(
+ sys_cfg={}, distro=None, paths=None
+ )
+ ds._get_data()
+ assert m_dhcp.call_count == 1
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceGCE.EphemeralDHCPv4",
+ autospec=True,
+ )
+ def test_datasource_doesnt_use_ephemeral_dhcp(self, m_dhcp):
+ _set_mock_metadata()
+ ds = DataSourceGCE.DataSourceGCE(sys_cfg={}, distro=None, paths=None)
+ ds._get_data()
+ assert m_dhcp.call_count == 0
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_hetzner.py b/tests/unittests/sources/test_hetzner.py
index eadb92f1..f80ed45f 100644
--- a/tests/unittests/test_datasource/test_hetzner.py
+++ b/tests/unittests/sources/test_hetzner.py
@@ -4,16 +4,17 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.sources import DataSourceHetzner
-import cloudinit.sources.helpers.hetzner as hc_helper
-from cloudinit import util, settings, helpers
-
-from cloudinit.tests.helpers import mock, CiTestCase
-
import base64
+
import pytest
-METADATA = util.load_yaml("""
+import cloudinit.sources.helpers.hetzner as hc_helper
+from cloudinit import helpers, settings, util
+from cloudinit.sources import DataSourceHetzner
+from tests.unittests.helpers import CiTestCase, mock
+
+METADATA = util.load_yaml(
+ """
hostname: cloudinit-test
instance-id: 123456
local-ipv4: ''
@@ -23,36 +24,31 @@ network-config:
name: eth0
subnets:
- dns_nameservers:
- - 213.133.99.99
- - 213.133.100.100
- - 213.133.98.98
+ - 185.12.64.1
+ - 185.12.64.2
ipv4: true
type: dhcp
- type: physical
- - name: eth0:0
- subnets:
- address: 2a01:4f8:beef:beef::1/64
+ dns_nameservers:
+ - 2a01:4ff:ff00::add:2
+ - 2a01:4ff:ff00::add:1
gateway: fe80::1
ipv6: true
- routes:
- - gateway: fe80::1%eth0
- netmask: 0
- network: '::'
- type: static
type: physical
version: 1
network-sysconfig: "DEVICE='eth0'\nTYPE=Ethernet\nBOOTPROTO=dhcp\n\
ONBOOT='yes'\nHWADDR=96:00:00:08:19:da\n\
IPV6INIT=yes\nIPV6ADDR=2a01:4f8:beef:beef::1/64\n\
IPV6_DEFAULTGW=fe80::1%eth0\nIPV6_AUTOCONF=no\n\
- DNS1=213.133.99.99\nDNS2=213.133.100.100\n"
-public-ipv4: 192.168.0.1
+ DNS1=185.12.64.1\nDNS2=185.12.64.2\n"
+public-ipv4: 192.168.0.2
public-keys:
- ssh-ed25519 \
AAAAC3Nzac1lZdI1NTE5AaaAIaFrcac0yVITsmRrmueq6MD0qYNKlEvW8O1Ib4nkhmWh \
test-key@workstation
vendor_data: "test"
-""")
+"""
+)
USERDATA = b"""#cloud-config
runcmd:
@@ -64,55 +60,78 @@ class TestDataSourceHetzner(CiTestCase):
"""
Test reading the meta-data
"""
+
def setUp(self):
super(TestDataSourceHetzner, self).setUp()
self.tmp = self.tmp_dir()
def get_ds(self):
ds = DataSourceHetzner.DataSourceHetzner(
- settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
return ds
- @mock.patch('cloudinit.net.EphemeralIPv4Network')
- @mock.patch('cloudinit.net.find_fallback_nic')
- @mock.patch('cloudinit.sources.helpers.hetzner.read_metadata')
- @mock.patch('cloudinit.sources.helpers.hetzner.read_userdata')
- @mock.patch('cloudinit.sources.DataSourceHetzner.get_hcloud_data')
- def test_read_data(self, m_get_hcloud_data, m_usermd, m_readmd,
- m_fallback_nic, m_net):
- m_get_hcloud_data.return_value = (True,
- str(METADATA.get('instance-id')))
+ @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+ @mock.patch("cloudinit.sources.DataSourceHetzner.EphemeralDHCPv4")
+ @mock.patch("cloudinit.net.find_fallback_nic")
+ @mock.patch("cloudinit.sources.helpers.hetzner.read_metadata")
+ @mock.patch("cloudinit.sources.helpers.hetzner.read_userdata")
+ @mock.patch("cloudinit.sources.DataSourceHetzner.get_hcloud_data")
+ def test_read_data(
+ self,
+ m_get_hcloud_data,
+ m_usermd,
+ m_readmd,
+ m_fallback_nic,
+ m_net,
+ m_dhcp,
+ ):
+ m_get_hcloud_data.return_value = (
+ True,
+ str(METADATA.get("instance-id")),
+ )
m_readmd.return_value = METADATA.copy()
m_usermd.return_value = USERDATA
- m_fallback_nic.return_value = 'eth0'
+ m_fallback_nic.return_value = "eth0"
+ m_dhcp.return_value = [
+ {
+ "interface": "eth0",
+ "fixed-address": "192.168.0.2",
+ "routers": "192.168.0.1",
+ "subnet-mask": "255.255.255.0",
+ "broadcast-address": "192.168.0.255",
+ }
+ ]
ds = self.get_ds()
ret = ds.get_data()
self.assertTrue(ret)
m_net.assert_called_once_with(
- 'eth0', '169.254.0.1',
- 16, '169.254.255.255'
+ iface="eth0",
+ connectivity_url_data={
+ "url": "http://169.254.169.254/hetzner/v1/metadata/instance-id"
+ },
)
self.assertTrue(m_readmd.called)
- self.assertEqual(METADATA.get('hostname'), ds.get_hostname())
+ self.assertEqual(METADATA.get("hostname"), ds.get_hostname())
- self.assertEqual(METADATA.get('public-keys'),
- ds.get_public_ssh_keys())
+ self.assertEqual(METADATA.get("public-keys"), ds.get_public_ssh_keys())
self.assertIsInstance(ds.get_public_ssh_keys(), list)
self.assertEqual(ds.get_userdata_raw(), USERDATA)
- self.assertEqual(ds.get_vendordata_raw(), METADATA.get('vendor_data'))
-
- @mock.patch('cloudinit.sources.helpers.hetzner.read_metadata')
- @mock.patch('cloudinit.net.find_fallback_nic')
- @mock.patch('cloudinit.sources.DataSourceHetzner.get_hcloud_data')
- def test_not_on_hetzner_returns_false(self, m_get_hcloud_data,
- m_find_fallback, m_read_md):
+ self.assertEqual(ds.get_vendordata_raw(), METADATA.get("vendor_data"))
+
+ @mock.patch("cloudinit.sources.helpers.hetzner.read_metadata")
+ @mock.patch("cloudinit.net.find_fallback_nic")
+ @mock.patch("cloudinit.sources.DataSourceHetzner.get_hcloud_data")
+ def test_not_on_hetzner_returns_false(
+ self, m_get_hcloud_data, m_find_fallback, m_read_md
+ ):
"""If helper 'get_hcloud_data' returns False,
- return False from get_data."""
+ return False from get_data."""
m_get_hcloud_data.return_value = (False, None)
ds = self.get_ds()
ret = ds.get_data()
@@ -132,11 +151,14 @@ class TestMaybeB64Decode:
with pytest.raises(TypeError):
hc_helper.maybe_b64decode(invalid_input)
- @pytest.mark.parametrize("in_data,expected", [
- # If data is not b64 encoded, then return value should be the same.
- (b"this is my data", b"this is my data"),
- # If data is b64 encoded, then return value should be decoded.
- (base64.b64encode(b"data"), b"data"),
- ])
+ @pytest.mark.parametrize(
+ "in_data,expected",
+ [
+ # If data is not b64 encoded, then return value should be the same.
+ (b"this is my data", b"this is my data"),
+ # If data is b64 encoded, then return value should be decoded.
+ (base64.b64encode(b"data"), b"data"),
+ ],
+ )
def test_happy_path(self, in_data, expected):
assert expected == hc_helper.maybe_b64decode(in_data)
diff --git a/tests/unittests/test_datasource/test_ibmcloud.py b/tests/unittests/sources/test_ibmcloud.py
index 9013ae9f..17a8be64 100644
--- a/tests/unittests/test_datasource/test_ibmcloud.py
+++ b/tests/unittests/sources/test_ibmcloud.py
@@ -1,15 +1,15 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.helpers import Paths
-from cloudinit.sources import DataSourceIBMCloud as ibm
-from cloudinit.tests import helpers as test_helpers
-from cloudinit import util
-
import base64
import copy
import json
from textwrap import dedent
+from cloudinit import util
+from cloudinit.helpers import Paths
+from cloudinit.sources import DataSourceIBMCloud as ibm
+from tests.unittests import helpers as test_helpers
+
mock = test_helpers.mock
D_PATH = "cloudinit.sources.DataSourceIBMCloud."
@@ -23,24 +23,36 @@ class TestGetIBMPlatform(test_helpers.CiTestCase):
blkid_base = {
"/dev/xvda1": {
- "DEVNAME": "/dev/xvda1", "LABEL": "cloudimg-bootfs",
- "TYPE": "ext3"},
+ "DEVNAME": "/dev/xvda1",
+ "LABEL": "cloudimg-bootfs",
+ "TYPE": "ext3",
+ },
"/dev/xvda2": {
- "DEVNAME": "/dev/xvda2", "LABEL": "cloudimg-rootfs",
- "TYPE": "ext4"},
+ "DEVNAME": "/dev/xvda2",
+ "LABEL": "cloudimg-rootfs",
+ "TYPE": "ext4",
+ },
}
blkid_metadata_disk = {
"/dev/xvdh1": {
- "DEVNAME": "/dev/xvdh1", "LABEL": "METADATA", "TYPE": "vfat",
- "SEC_TYPE": "msdos", "UUID": "681B-8C5D",
- "PARTUUID": "3d631e09-01"},
+ "DEVNAME": "/dev/xvdh1",
+ "LABEL": "METADATA",
+ "TYPE": "vfat",
+ "SEC_TYPE": "msdos",
+ "UUID": "681B-8C5D",
+ "PARTUUID": "3d631e09-01",
+ },
}
blkid_oscode_disk = {
"/dev/xvdh": {
- "DEVNAME": "/dev/xvdh", "LABEL": "config-2", "TYPE": "vfat",
- "SEC_TYPE": "msdos", "UUID": ibm.IBM_CONFIG_UUID}
+ "DEVNAME": "/dev/xvdh",
+ "LABEL": "config-2",
+ "TYPE": "vfat",
+ "SEC_TYPE": "msdos",
+ "UUID": ibm.IBM_CONFIG_UUID,
+ }
}
def setUp(self):
@@ -56,7 +68,8 @@ class TestGetIBMPlatform(test_helpers.CiTestCase):
m_is_prov.return_value = False
self.assertEqual(
(ibm.Platforms.TEMPLATE_LIVE_METADATA, "/dev/xvdh1"),
- ibm.get_ibm_platform())
+ ibm.get_ibm_platform(),
+ )
def test_id_template_prov_metadata(self, m_blkid, m_is_prov, _m_xen):
"""identify TEMPLATE_PROVISIONING_METADATA."""
@@ -64,7 +77,8 @@ class TestGetIBMPlatform(test_helpers.CiTestCase):
m_is_prov.return_value = True
self.assertEqual(
(ibm.Platforms.TEMPLATE_PROVISIONING_METADATA, "/dev/xvdh1"),
- ibm.get_ibm_platform())
+ ibm.get_ibm_platform(),
+ )
def test_id_template_prov_nodata(self, m_blkid, m_is_prov, _m_xen):
"""identify TEMPLATE_PROVISIONING_NODATA."""
@@ -72,14 +86,16 @@ class TestGetIBMPlatform(test_helpers.CiTestCase):
m_is_prov.return_value = True
self.assertEqual(
(ibm.Platforms.TEMPLATE_PROVISIONING_NODATA, None),
- ibm.get_ibm_platform())
+ ibm.get_ibm_platform(),
+ )
def test_id_os_code(self, m_blkid, m_is_prov, _m_xen):
"""Identify OS_CODE."""
m_blkid.return_value = self.blkid_oscode
m_is_prov.return_value = False
- self.assertEqual((ibm.Platforms.OS_CODE, "/dev/xvdh"),
- ibm.get_ibm_platform())
+ self.assertEqual(
+ (ibm.Platforms.OS_CODE, "/dev/xvdh"), ibm.get_ibm_platform()
+ )
def test_id_os_code_must_match_uuid(self, m_blkid, m_is_prov, _m_xen):
"""Test against false positive on openstack with non-ibm UUID."""
@@ -116,7 +132,8 @@ class TestReadMD(test_helpers.CiTestCase):
"public_keys": {"1091307": "ssh-rsa AAAAB3N..Hw== ci-pubkey"},
}
- content_interfaces = dedent("""\
+ content_interfaces = dedent(
+ """\
auto lo
iface lo inet loopback
@@ -125,71 +142,107 @@ class TestReadMD(test_helpers.CiTestCase):
iface eth0 inet static
address 10.82.43.5
netmask 255.255.255.192
- """)
+ """
+ )
userdata = b"#!/bin/sh\necho hi mom\n"
# meta.js file gets json encoded userdata as a list.
meta_js = '["#!/bin/sh\necho hi mom\n"]'
vendor_data = {
- "cloud-init": "#!/bin/bash\necho 'root:$6$5ab01p1m1' | chpasswd -e"}
+ "cloud-init": "#!/bin/bash\necho 'root:$6$5ab01p1m1' | chpasswd -e"
+ }
network_data = {
"links": [
- {"id": "interface_29402281", "name": "eth0", "mtu": None,
- "type": "phy", "ethernet_mac_address": "06:00:f1:bd:da:25"},
- {"id": "interface_29402279", "name": "eth1", "mtu": None,
- "type": "phy", "ethernet_mac_address": "06:98:5e:d0:7f:86"}
+ {
+ "id": "interface_29402281",
+ "name": "eth0",
+ "mtu": None,
+ "type": "phy",
+ "ethernet_mac_address": "06:00:f1:bd:da:25",
+ },
+ {
+ "id": "interface_29402279",
+ "name": "eth1",
+ "mtu": None,
+ "type": "phy",
+ "ethernet_mac_address": "06:98:5e:d0:7f:86",
+ },
],
"networks": [
- {"id": "network_109887563", "link": "interface_29402281",
- "type": "ipv4", "ip_address": "10.82.43.2",
- "netmask": "255.255.255.192",
- "routes": [
- {"network": "10.0.0.0", "netmask": "255.0.0.0",
- "gateway": "10.82.43.1"},
- {"network": "161.26.0.0", "netmask": "255.255.0.0",
- "gateway": "10.82.43.1"}]},
- {"id": "network_109887551", "link": "interface_29402279",
- "type": "ipv4", "ip_address": "108.168.194.252",
- "netmask": "255.255.255.248",
- "routes": [
- {"network": "0.0.0.0", "netmask": "0.0.0.0",
- "gateway": "108.168.194.249"}]}
+ {
+ "id": "network_109887563",
+ "link": "interface_29402281",
+ "type": "ipv4",
+ "ip_address": "10.82.43.2",
+ "netmask": "255.255.255.192",
+ "routes": [
+ {
+ "network": "10.0.0.0",
+ "netmask": "255.0.0.0",
+ "gateway": "10.82.43.1",
+ },
+ {
+ "network": "161.26.0.0",
+ "netmask": "255.255.0.0",
+ "gateway": "10.82.43.1",
+ },
+ ],
+ },
+ {
+ "id": "network_109887551",
+ "link": "interface_29402279",
+ "type": "ipv4",
+ "ip_address": "108.168.194.252",
+ "netmask": "255.255.255.248",
+ "routes": [
+ {
+ "network": "0.0.0.0",
+ "netmask": "0.0.0.0",
+ "gateway": "108.168.194.249",
+ }
+ ],
+ },
],
"services": [
{"type": "dns", "address": "10.0.80.11"},
- {"type": "dns", "address": "10.0.80.12"}
+ {"type": "dns", "address": "10.0.80.12"},
],
}
- sysuuid = '7f79ebf5-d791-43c3-a723-854e8389d59f'
+ sysuuid = "7f79ebf5-d791-43c3-a723-854e8389d59f"
def _get_expected_metadata(self, os_md):
"""return expected 'metadata' for data loaded from meta_data.json."""
os_md = copy.deepcopy(os_md)
renames = (
- ('hostname', 'local-hostname'),
- ('uuid', 'instance-id'),
- ('public_keys', 'public-keys'))
+ ("hostname", "local-hostname"),
+ ("uuid", "instance-id"),
+ ("public_keys", "public-keys"),
+ )
ret = {}
for osname, mdname in renames:
if osname in os_md:
ret[mdname] = os_md[osname]
- if 'random_seed' in os_md:
- ret['random_seed'] = base64.b64decode(os_md['random_seed'])
+ if "random_seed" in os_md:
+ ret["random_seed"] = base64.b64decode(os_md["random_seed"])
return ret
def test_provisioning_md(self, m_platform, m_sysuuid):
"""Provisioning env with a metadata disk should return None."""
m_platform.return_value = (
- ibm.Platforms.TEMPLATE_PROVISIONING_METADATA, "/dev/xvdh")
+ ibm.Platforms.TEMPLATE_PROVISIONING_METADATA,
+ "/dev/xvdh",
+ )
self.assertIsNone(ibm.read_md())
def test_provisioning_no_metadata(self, m_platform, m_sysuuid):
"""Provisioning env with no metadata disk should return None."""
m_platform.return_value = (
- ibm.Platforms.TEMPLATE_PROVISIONING_NODATA, None)
+ ibm.Platforms.TEMPLATE_PROVISIONING_NODATA,
+ None,
+ )
self.assertIsNone(ibm.read_md())
def test_provisioning_not_ibm(self, m_platform, m_sysuuid):
@@ -201,62 +254,83 @@ class TestReadMD(test_helpers.CiTestCase):
"""Template live environment should be identified."""
tmpdir = self.tmp_dir()
m_platform.return_value = (
- ibm.Platforms.TEMPLATE_LIVE_METADATA, tmpdir)
+ ibm.Platforms.TEMPLATE_LIVE_METADATA,
+ tmpdir,
+ )
m_sysuuid.return_value = self.sysuuid
- test_helpers.populate_dir(tmpdir, {
- 'openstack/latest/meta_data.json': json.dumps(self.template_md),
- 'openstack/latest/user_data': self.userdata,
- 'openstack/content/interfaces': self.content_interfaces,
- 'meta.js': self.meta_js})
+ test_helpers.populate_dir(
+ tmpdir,
+ {
+ "openstack/latest/meta_data.json": json.dumps(
+ self.template_md
+ ),
+ "openstack/latest/user_data": self.userdata,
+ "openstack/content/interfaces": self.content_interfaces,
+ "meta.js": self.meta_js,
+ },
+ )
ret = ibm.read_md()
- self.assertEqual(ibm.Platforms.TEMPLATE_LIVE_METADATA,
- ret['platform'])
- self.assertEqual(tmpdir, ret['source'])
- self.assertEqual(self.userdata, ret['userdata'])
- self.assertEqual(self._get_expected_metadata(self.template_md),
- ret['metadata'])
- self.assertEqual(self.sysuuid, ret['system-uuid'])
+ self.assertEqual(ibm.Platforms.TEMPLATE_LIVE_METADATA, ret["platform"])
+ self.assertEqual(tmpdir, ret["source"])
+ self.assertEqual(self.userdata, ret["userdata"])
+ self.assertEqual(
+ self._get_expected_metadata(self.template_md), ret["metadata"]
+ )
+ self.assertEqual(self.sysuuid, ret["system-uuid"])
def test_os_code_live(self, m_platform, m_sysuuid):
"""Verify an os_code metadata path."""
tmpdir = self.tmp_dir()
m_platform.return_value = (ibm.Platforms.OS_CODE, tmpdir)
netdata = json.dumps(self.network_data)
- test_helpers.populate_dir(tmpdir, {
- 'openstack/latest/meta_data.json': json.dumps(self.oscode_md),
- 'openstack/latest/user_data': self.userdata,
- 'openstack/latest/vendor_data.json': json.dumps(self.vendor_data),
- 'openstack/latest/network_data.json': netdata,
- })
+ test_helpers.populate_dir(
+ tmpdir,
+ {
+ "openstack/latest/meta_data.json": json.dumps(self.oscode_md),
+ "openstack/latest/user_data": self.userdata,
+ "openstack/latest/vendor_data.json": json.dumps(
+ self.vendor_data
+ ),
+ "openstack/latest/network_data.json": netdata,
+ },
+ )
ret = ibm.read_md()
- self.assertEqual(ibm.Platforms.OS_CODE, ret['platform'])
- self.assertEqual(tmpdir, ret['source'])
- self.assertEqual(self.userdata, ret['userdata'])
- self.assertEqual(self._get_expected_metadata(self.oscode_md),
- ret['metadata'])
+ self.assertEqual(ibm.Platforms.OS_CODE, ret["platform"])
+ self.assertEqual(tmpdir, ret["source"])
+ self.assertEqual(self.userdata, ret["userdata"])
+ self.assertEqual(
+ self._get_expected_metadata(self.oscode_md), ret["metadata"]
+ )
def test_os_code_live_no_userdata(self, m_platform, m_sysuuid):
"""Verify os_code without user-data."""
tmpdir = self.tmp_dir()
m_platform.return_value = (ibm.Platforms.OS_CODE, tmpdir)
- test_helpers.populate_dir(tmpdir, {
- 'openstack/latest/meta_data.json': json.dumps(self.oscode_md),
- 'openstack/latest/vendor_data.json': json.dumps(self.vendor_data),
- })
+ test_helpers.populate_dir(
+ tmpdir,
+ {
+ "openstack/latest/meta_data.json": json.dumps(self.oscode_md),
+ "openstack/latest/vendor_data.json": json.dumps(
+ self.vendor_data
+ ),
+ },
+ )
ret = ibm.read_md()
- self.assertEqual(ibm.Platforms.OS_CODE, ret['platform'])
- self.assertEqual(tmpdir, ret['source'])
- self.assertIsNone(ret['userdata'])
- self.assertEqual(self._get_expected_metadata(self.oscode_md),
- ret['metadata'])
+ self.assertEqual(ibm.Platforms.OS_CODE, ret["platform"])
+ self.assertEqual(tmpdir, ret["source"])
+ self.assertIsNone(ret["userdata"])
+ self.assertEqual(
+ self._get_expected_metadata(self.oscode_md), ret["metadata"]
+ )
class TestIsIBMProvisioning(test_helpers.FilesystemMockingTestCase):
"""Test the _is_ibm_provisioning method."""
+
inst_log = "/root/swinstall.log"
prov_cfg = "/root/provisioningConfiguration.cfg"
boot_ref = "/proc/1/environ"
@@ -279,9 +353,11 @@ class TestIsIBMProvisioning(test_helpers.FilesystemMockingTestCase):
def test_config_with_old_log(self):
"""A config with a log from previous boot is not provisioning."""
rootd = self.tmp_dir()
- data = {self.prov_cfg: ("key=value\nkey2=val2\n", -10),
- self.inst_log: ("log data\n", -30),
- self.boot_ref: ("PWD=/", 0)}
+ data = {
+ self.prov_cfg: ("key=value\nkey2=val2\n", -10),
+ self.inst_log: ("log data\n", -30),
+ self.boot_ref: ("PWD=/", 0),
+ }
test_helpers.populate_dir_with_ts(rootd, data)
self.assertFalse(self._call_with_root(rootd=rootd))
self.assertIn("from previous boot", self.logs.getvalue())
@@ -289,9 +365,11 @@ class TestIsIBMProvisioning(test_helpers.FilesystemMockingTestCase):
def test_config_with_new_log(self):
"""A config with a log from this boot is provisioning."""
rootd = self.tmp_dir()
- data = {self.prov_cfg: ("key=value\nkey2=val2\n", -10),
- self.inst_log: ("log data\n", 30),
- self.boot_ref: ("PWD=/", 0)}
+ data = {
+ self.prov_cfg: ("key=value\nkey2=val2\n", -10),
+ self.inst_log: ("log data\n", 30),
+ self.boot_ref: ("PWD=/", 0),
+ }
test_helpers.populate_dir_with_ts(rootd, data)
self.assertTrue(self._call_with_root(rootd=rootd))
self.assertIn("from current boot", self.logs.getvalue())
@@ -300,44 +378,49 @@ class TestIsIBMProvisioning(test_helpers.FilesystemMockingTestCase):
"""If the config and log existed, but no reference, assume not."""
rootd = self.tmp_dir()
test_helpers.populate_dir(
- rootd, {self.prov_cfg: "key=value", self.inst_log: "log data\n"})
+ rootd, {self.prov_cfg: "key=value", self.inst_log: "log data\n"}
+ )
self.assertFalse(self._call_with_root(rootd=rootd))
self.assertIn("no reference file", self.logs.getvalue())
class TestDataSourceIBMCloud(test_helpers.CiTestCase):
-
def setUp(self):
super(TestDataSourceIBMCloud, self).setUp()
self.tmp = self.tmp_dir()
- self.cloud_dir = self.tmp_path('cloud', dir=self.tmp)
+ self.cloud_dir = self.tmp_path("cloud", dir=self.tmp)
util.ensure_dir(self.cloud_dir)
- paths = Paths({'run_dir': self.tmp, 'cloud_dir': self.cloud_dir})
- self.ds = ibm.DataSourceIBMCloud(
- sys_cfg={}, distro=None, paths=paths)
+ paths = Paths({"run_dir": self.tmp, "cloud_dir": self.cloud_dir})
+ self.ds = ibm.DataSourceIBMCloud(sys_cfg={}, distro=None, paths=paths)
def test_get_data_false(self):
"""When read_md returns None, get_data returns False."""
- with mock.patch(D_PATH + 'read_md', return_value=None):
+ with mock.patch(D_PATH + "read_md", return_value=None):
self.assertFalse(self.ds.get_data())
def test_get_data_processes_read_md(self):
"""get_data processes and caches content returned by read_md."""
md = {
- 'metadata': {}, 'networkdata': 'net', 'platform': 'plat',
- 'source': 'src', 'system-uuid': 'uuid', 'userdata': 'ud',
- 'vendordata': 'vd'}
- with mock.patch(D_PATH + 'read_md', return_value=md):
+ "metadata": {},
+ "networkdata": "net",
+ "platform": "plat",
+ "source": "src",
+ "system-uuid": "uuid",
+ "userdata": "ud",
+ "vendordata": "vd",
+ }
+ with mock.patch(D_PATH + "read_md", return_value=md):
self.assertTrue(self.ds.get_data())
- self.assertEqual('src', self.ds.source)
- self.assertEqual('plat', self.ds.platform)
+ self.assertEqual("src", self.ds.source)
+ self.assertEqual("plat", self.ds.platform)
self.assertEqual({}, self.ds.metadata)
- self.assertEqual('ud', self.ds.userdata_raw)
- self.assertEqual('net', self.ds.network_json)
- self.assertEqual('vd', self.ds.vendordata_pure)
- self.assertEqual('uuid', self.ds.system_uuid)
- self.assertEqual('ibmcloud', self.ds.cloud_name)
- self.assertEqual('ibmcloud', self.ds.platform_type)
- self.assertEqual('plat (src)', self.ds.subplatform)
+ self.assertEqual("ud", self.ds.userdata_raw)
+ self.assertEqual("net", self.ds.network_json)
+ self.assertEqual("vd", self.ds.vendordata_pure)
+ self.assertEqual("uuid", self.ds.system_uuid)
+ self.assertEqual("ibmcloud", self.ds.cloud_name)
+ self.assertEqual("ibmcloud", self.ds.platform_type)
+ self.assertEqual("plat (src)", self.ds.subplatform)
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_init.py b/tests/unittests/sources/test_init.py
new file mode 100644
index 00000000..ce8fc970
--- /dev/null
+++ b/tests/unittests/sources/test_init.py
@@ -0,0 +1,994 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import copy
+import inspect
+import os
+import stat
+
+from cloudinit import importer, util
+from cloudinit.event import EventScope, EventType
+from cloudinit.helpers import Paths
+from cloudinit.sources import (
+ EXPERIMENTAL_TEXT,
+ INSTANCE_JSON_FILE,
+ INSTANCE_JSON_SENSITIVE_FILE,
+ METADATA_UNKNOWN,
+ REDACT_SENSITIVE_VALUE,
+ UNSET,
+ DataSource,
+ canonical_cloud_id,
+ redact_sensitive_keys,
+)
+from cloudinit.user_data import UserDataProcessor
+from tests.unittests.helpers import CiTestCase, mock
+
+
+class DataSourceTestSubclassNet(DataSource):
+
+ dsname = "MyTestSubclass"
+ url_max_wait = 55
+
+ def __init__(
+ self,
+ sys_cfg,
+ distro,
+ paths,
+ custom_metadata=None,
+ custom_userdata=None,
+ get_data_retval=True,
+ ):
+ super(DataSourceTestSubclassNet, self).__init__(sys_cfg, distro, paths)
+ self._custom_userdata = custom_userdata
+ self._custom_metadata = custom_metadata
+ self._get_data_retval = get_data_retval
+
+ def _get_cloud_name(self):
+ return "SubclassCloudName"
+
+ def _get_data(self):
+ if self._custom_metadata:
+ self.metadata = self._custom_metadata
+ else:
+ self.metadata = {
+ "availability_zone": "myaz",
+ "local-hostname": "test-subclass-hostname",
+ "region": "myregion",
+ }
+ if self._custom_userdata:
+ self.userdata_raw = self._custom_userdata
+ else:
+ self.userdata_raw = "userdata_raw"
+ self.vendordata_raw = "vendordata_raw"
+ return self._get_data_retval
+
+
+class InvalidDataSourceTestSubclassNet(DataSource):
+ pass
+
+
+class TestDataSource(CiTestCase):
+
+ with_logs = True
+ maxDiff = None
+
+ def setUp(self):
+ super(TestDataSource, self).setUp()
+ self.sys_cfg = {"datasource": {"_undef": {"key1": False}}}
+ self.distro = "distrotest" # generally should be a Distro object
+ self.paths = Paths({})
+ self.datasource = DataSource(self.sys_cfg, self.distro, self.paths)
+
+ def test_datasource_init(self):
+ """DataSource initializes metadata attributes, ds_cfg and ud_proc."""
+ self.assertEqual(self.paths, self.datasource.paths)
+ self.assertEqual(self.sys_cfg, self.datasource.sys_cfg)
+ self.assertEqual(self.distro, self.datasource.distro)
+ self.assertIsNone(self.datasource.userdata)
+ self.assertEqual({}, self.datasource.metadata)
+ self.assertIsNone(self.datasource.userdata_raw)
+ self.assertIsNone(self.datasource.vendordata)
+ self.assertIsNone(self.datasource.vendordata_raw)
+ self.assertEqual({"key1": False}, self.datasource.ds_cfg)
+ self.assertIsInstance(self.datasource.ud_proc, UserDataProcessor)
+
+ def test_datasource_init_gets_ds_cfg_using_dsname(self):
+ """Init uses DataSource.dsname for sourcing ds_cfg."""
+ sys_cfg = {"datasource": {"MyTestSubclass": {"key2": False}}}
+ distro = "distrotest" # generally should be a Distro object
+ datasource = DataSourceTestSubclassNet(sys_cfg, distro, self.paths)
+ self.assertEqual({"key2": False}, datasource.ds_cfg)
+
+ def test_str_is_classname(self):
+ """The string representation of the datasource is the classname."""
+ self.assertEqual("DataSource", str(self.datasource))
+ self.assertEqual(
+ "DataSourceTestSubclassNet",
+ str(DataSourceTestSubclassNet("", "", self.paths)),
+ )
+
+ def test_datasource_get_url_params_defaults(self):
+ """get_url_params default url config settings for the datasource."""
+ params = self.datasource.get_url_params()
+ self.assertEqual(params.max_wait_seconds, self.datasource.url_max_wait)
+ self.assertEqual(params.timeout_seconds, self.datasource.url_timeout)
+ self.assertEqual(params.num_retries, self.datasource.url_retries)
+ self.assertEqual(
+ params.sec_between_retries, self.datasource.url_sec_between_retries
+ )
+
+ def test_datasource_get_url_params_subclassed(self):
+ """Subclasses can override get_url_params defaults."""
+ sys_cfg = {"datasource": {"MyTestSubclass": {"key2": False}}}
+ distro = "distrotest" # generally should be a Distro object
+ datasource = DataSourceTestSubclassNet(sys_cfg, distro, self.paths)
+ expected = (
+ datasource.url_max_wait,
+ datasource.url_timeout,
+ datasource.url_retries,
+ datasource.url_sec_between_retries,
+ )
+ url_params = datasource.get_url_params()
+ self.assertNotEqual(self.datasource.get_url_params(), url_params)
+ self.assertEqual(expected, url_params)
+
+ def test_datasource_get_url_params_ds_config_override(self):
+ """Datasource configuration options can override url param defaults."""
+ sys_cfg = {
+ "datasource": {
+ "MyTestSubclass": {
+ "max_wait": "1",
+ "timeout": "2",
+ "retries": "3",
+ "sec_between_retries": 4,
+ }
+ }
+ }
+ datasource = DataSourceTestSubclassNet(
+ sys_cfg, self.distro, self.paths
+ )
+ expected = (1, 2, 3, 4)
+ url_params = datasource.get_url_params()
+ self.assertNotEqual(
+ (
+ datasource.url_max_wait,
+ datasource.url_timeout,
+ datasource.url_retries,
+ datasource.url_sec_between_retries,
+ ),
+ url_params,
+ )
+ self.assertEqual(expected, url_params)
+
+ def test_datasource_get_url_params_is_zero_or_greater(self):
+ """get_url_params ignores timeouts with a value below 0."""
+ # Set an override that is below 0 which gets ignored.
+ sys_cfg = {"datasource": {"_undef": {"timeout": "-1"}}}
+ datasource = DataSource(sys_cfg, self.distro, self.paths)
+ (
+ _max_wait,
+ timeout,
+ _retries,
+ _sec_between_retries,
+ ) = datasource.get_url_params()
+ self.assertEqual(0, timeout)
+
+ def test_datasource_get_url_uses_defaults_on_errors(self):
+ """On invalid system config values for url_params defaults are used."""
+ # All invalid values should be logged
+ sys_cfg = {
+ "datasource": {
+ "_undef": {
+ "max_wait": "nope",
+ "timeout": "bug",
+ "retries": "nonint",
+ }
+ }
+ }
+ datasource = DataSource(sys_cfg, self.distro, self.paths)
+ url_params = datasource.get_url_params()
+ expected = (
+ datasource.url_max_wait,
+ datasource.url_timeout,
+ datasource.url_retries,
+ datasource.url_sec_between_retries,
+ )
+ self.assertEqual(expected, url_params)
+ logs = self.logs.getvalue()
+ expected_logs = [
+ "Config max_wait 'nope' is not an int, using default '-1'",
+ "Config timeout 'bug' is not an int, using default '10'",
+ "Config retries 'nonint' is not an int, using default '5'",
+ ]
+ for log in expected_logs:
+ self.assertIn(log, logs)
+
+ @mock.patch("cloudinit.sources.net.find_fallback_nic")
+ def test_fallback_interface_is_discovered(self, m_get_fallback_nic):
+ """The fallback_interface is discovered via find_fallback_nic."""
+ m_get_fallback_nic.return_value = "nic9"
+ self.assertEqual("nic9", self.datasource.fallback_interface)
+
+ @mock.patch("cloudinit.sources.net.find_fallback_nic")
+ def test_fallback_interface_logs_undiscovered(self, m_get_fallback_nic):
+ """Log a warning when fallback_interface can not discover the nic."""
+ self.datasource._cloud_name = "MySupahCloud"
+ m_get_fallback_nic.return_value = None # Couldn't discover nic
+ self.assertIsNone(self.datasource.fallback_interface)
+ self.assertEqual(
+ "WARNING: Did not find a fallback interface on MySupahCloud.\n",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("cloudinit.sources.net.find_fallback_nic")
+ def test_wb_fallback_interface_is_cached(self, m_get_fallback_nic):
+ """The fallback_interface is cached and won't be rediscovered."""
+ self.datasource._fallback_interface = "nic10"
+ self.assertEqual("nic10", self.datasource.fallback_interface)
+ m_get_fallback_nic.assert_not_called()
+
+ def test__get_data_unimplemented(self):
+ """Raise an error when _get_data is not implemented."""
+ with self.assertRaises(NotImplementedError) as context_manager:
+ self.datasource.get_data()
+ self.assertIn(
+ "Subclasses of DataSource must implement _get_data",
+ str(context_manager.exception),
+ )
+ datasource2 = InvalidDataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, self.paths
+ )
+ with self.assertRaises(NotImplementedError) as context_manager:
+ datasource2.get_data()
+ self.assertIn(
+ "Subclasses of DataSource must implement _get_data",
+ str(context_manager.exception),
+ )
+
+ def test_get_data_calls_subclass__get_data(self):
+ """Datasource.get_data uses the subclass' version of _get_data."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({"run_dir": tmp})
+ )
+ self.assertTrue(datasource.get_data())
+ self.assertEqual(
+ {
+ "availability_zone": "myaz",
+ "local-hostname": "test-subclass-hostname",
+ "region": "myregion",
+ },
+ datasource.metadata,
+ )
+ self.assertEqual("userdata_raw", datasource.userdata_raw)
+ self.assertEqual("vendordata_raw", datasource.vendordata_raw)
+
+ def test_get_hostname_strips_local_hostname_without_domain(self):
+ """Datasource.get_hostname strips metadata local-hostname of domain."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({"run_dir": tmp})
+ )
+ self.assertTrue(datasource.get_data())
+ self.assertEqual(
+ "test-subclass-hostname", datasource.metadata["local-hostname"]
+ )
+ self.assertEqual("test-subclass-hostname", datasource.get_hostname())
+ datasource.metadata["local-hostname"] = "hostname.my.domain.com"
+ self.assertEqual("hostname", datasource.get_hostname())
+
+ def test_get_hostname_with_fqdn_returns_local_hostname_with_domain(self):
+ """Datasource.get_hostname with fqdn set gets qualified hostname."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({"run_dir": tmp})
+ )
+ self.assertTrue(datasource.get_data())
+ datasource.metadata["local-hostname"] = "hostname.my.domain.com"
+ self.assertEqual(
+ "hostname.my.domain.com", datasource.get_hostname(fqdn=True)
+ )
+
+ def test_get_hostname_without_metadata_uses_system_hostname(self):
+ """Datasource.gethostname runs util.get_hostname when no metadata."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({"run_dir": tmp})
+ )
+ self.assertEqual({}, datasource.metadata)
+ mock_fqdn = "cloudinit.sources.util.get_fqdn_from_hosts"
+ with mock.patch("cloudinit.sources.util.get_hostname") as m_gethost:
+ with mock.patch(mock_fqdn) as m_fqdn:
+ m_gethost.return_value = "systemhostname.domain.com"
+ m_fqdn.return_value = None # No maching fqdn in /etc/hosts
+ self.assertEqual("systemhostname", datasource.get_hostname())
+ self.assertEqual(
+ "systemhostname.domain.com",
+ datasource.get_hostname(fqdn=True),
+ )
+
+ def test_get_hostname_without_metadata_returns_none(self):
+ """Datasource.gethostname returns None when metadata_only and no MD."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({"run_dir": tmp})
+ )
+ self.assertEqual({}, datasource.metadata)
+ mock_fqdn = "cloudinit.sources.util.get_fqdn_from_hosts"
+ with mock.patch("cloudinit.sources.util.get_hostname") as m_gethost:
+ with mock.patch(mock_fqdn) as m_fqdn:
+ self.assertIsNone(datasource.get_hostname(metadata_only=True))
+ self.assertIsNone(
+ datasource.get_hostname(fqdn=True, metadata_only=True)
+ )
+ self.assertEqual([], m_gethost.call_args_list)
+ self.assertEqual([], m_fqdn.call_args_list)
+
+ def test_get_hostname_without_metadata_prefers_etc_hosts(self):
+ """Datasource.gethostname prefers /etc/hosts to util.get_hostname."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({"run_dir": tmp})
+ )
+ self.assertEqual({}, datasource.metadata)
+ mock_fqdn = "cloudinit.sources.util.get_fqdn_from_hosts"
+ with mock.patch("cloudinit.sources.util.get_hostname") as m_gethost:
+ with mock.patch(mock_fqdn) as m_fqdn:
+ m_gethost.return_value = "systemhostname.domain.com"
+ m_fqdn.return_value = "fqdnhostname.domain.com"
+ self.assertEqual("fqdnhostname", datasource.get_hostname())
+ self.assertEqual(
+ "fqdnhostname.domain.com",
+ datasource.get_hostname(fqdn=True),
+ )
+
+ def test_get_data_does_not_write_instance_data_on_failure(self):
+ """get_data does not write INSTANCE_JSON_FILE on get_data False."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg,
+ self.distro,
+ Paths({"run_dir": tmp}),
+ get_data_retval=False,
+ )
+ self.assertFalse(datasource.get_data())
+ json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+ self.assertFalse(
+ os.path.exists(json_file), "Found unexpected file %s" % json_file
+ )
+
+ def test_get_data_writes_json_instance_data_on_success(self):
+ """get_data writes INSTANCE_JSON_FILE to run_dir as world readable."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({"run_dir": tmp})
+ )
+ sys_info = {
+ "python": "3.7",
+ "platform": (
+ "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal"
+ ),
+ "uname": [
+ "Linux",
+ "myhost",
+ "5.4.0-24-generic",
+ "SMP blah",
+ "x86_64",
+ ],
+ "variant": "ubuntu",
+ "dist": ["ubuntu", "20.04", "focal"],
+ }
+ with mock.patch("cloudinit.util.system_info", return_value=sys_info):
+ with mock.patch(
+ "cloudinit.sources.canonical_cloud_id",
+ return_value="canonical_cloud_id",
+ ):
+ datasource.get_data()
+ json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+ content = util.load_file(json_file)
+ expected = {
+ "base64_encoded_keys": [],
+ "merged_cfg": REDACT_SENSITIVE_VALUE,
+ "sensitive_keys": ["merged_cfg"],
+ "sys_info": sys_info,
+ "v1": {
+ "_beta_keys": ["subplatform"],
+ "availability-zone": "myaz",
+ "availability_zone": "myaz",
+ "cloud_id": "canonical_cloud_id",
+ "cloud-name": "subclasscloudname",
+ "cloud_name": "subclasscloudname",
+ "distro": "ubuntu",
+ "distro_release": "focal",
+ "distro_version": "20.04",
+ "instance-id": "iid-datasource",
+ "instance_id": "iid-datasource",
+ "local-hostname": "test-subclass-hostname",
+ "local_hostname": "test-subclass-hostname",
+ "kernel_release": "5.4.0-24-generic",
+ "machine": "x86_64",
+ "platform": "mytestsubclass",
+ "public_ssh_keys": [],
+ "python_version": "3.7",
+ "region": "myregion",
+ "system_platform": (
+ "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal"
+ ),
+ "subplatform": "unknown",
+ "variant": "ubuntu",
+ },
+ "ds": {
+ "_doc": EXPERIMENTAL_TEXT,
+ "meta_data": {
+ "availability_zone": "myaz",
+ "local-hostname": "test-subclass-hostname",
+ "region": "myregion",
+ },
+ },
+ }
+ self.assertEqual(expected, util.load_json(content))
+ file_stat = os.stat(json_file)
+ self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode))
+ self.assertEqual(expected, util.load_json(content))
+
+ def test_get_data_writes_redacted_public_json_instance_data(self):
+ """get_data writes redacted content to public INSTANCE_JSON_FILE."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg,
+ self.distro,
+ Paths({"run_dir": tmp}),
+ custom_metadata={
+ "availability_zone": "myaz",
+ "local-hostname": "test-subclass-hostname",
+ "region": "myregion",
+ "some": {
+ "security-credentials": {
+ "cred1": "sekret",
+ "cred2": "othersekret",
+ }
+ },
+ },
+ )
+ self.assertCountEqual(
+ (
+ "merged_cfg",
+ "security-credentials",
+ ),
+ datasource.sensitive_metadata_keys,
+ )
+ sys_info = {
+ "python": "3.7",
+ "platform": (
+ "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal"
+ ),
+ "uname": [
+ "Linux",
+ "myhost",
+ "5.4.0-24-generic",
+ "SMP blah",
+ "x86_64",
+ ],
+ "variant": "ubuntu",
+ "dist": ["ubuntu", "20.04", "focal"],
+ }
+ with mock.patch("cloudinit.util.system_info", return_value=sys_info):
+ datasource.get_data()
+ json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+ redacted = util.load_json(util.load_file(json_file))
+ expected = {
+ "base64_encoded_keys": [],
+ "merged_cfg": REDACT_SENSITIVE_VALUE,
+ "sensitive_keys": [
+ "ds/meta_data/some/security-credentials",
+ "merged_cfg",
+ ],
+ "sys_info": sys_info,
+ "v1": {
+ "_beta_keys": ["subplatform"],
+ "availability-zone": "myaz",
+ "availability_zone": "myaz",
+ "cloud-name": "subclasscloudname",
+ "cloud_name": "subclasscloudname",
+ "distro": "ubuntu",
+ "distro_release": "focal",
+ "distro_version": "20.04",
+ "instance-id": "iid-datasource",
+ "instance_id": "iid-datasource",
+ "local-hostname": "test-subclass-hostname",
+ "local_hostname": "test-subclass-hostname",
+ "kernel_release": "5.4.0-24-generic",
+ "machine": "x86_64",
+ "platform": "mytestsubclass",
+ "public_ssh_keys": [],
+ "python_version": "3.7",
+ "region": "myregion",
+ "system_platform": (
+ "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal"
+ ),
+ "subplatform": "unknown",
+ "variant": "ubuntu",
+ },
+ "ds": {
+ "_doc": EXPERIMENTAL_TEXT,
+ "meta_data": {
+ "availability_zone": "myaz",
+ "local-hostname": "test-subclass-hostname",
+ "region": "myregion",
+ "some": {"security-credentials": REDACT_SENSITIVE_VALUE},
+ },
+ },
+ }
+ self.assertCountEqual(expected, redacted)
+ file_stat = os.stat(json_file)
+ self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode))
+
+ def test_get_data_writes_json_instance_data_sensitive(self):
+ """
+ get_data writes unmodified data to sensitive file as root-readonly.
+ """
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg,
+ self.distro,
+ Paths({"run_dir": tmp}),
+ custom_metadata={
+ "availability_zone": "myaz",
+ "local-hostname": "test-subclass-hostname",
+ "region": "myregion",
+ "some": {
+ "security-credentials": {
+ "cred1": "sekret",
+ "cred2": "othersekret",
+ }
+ },
+ },
+ )
+ sys_info = {
+ "python": "3.7",
+ "platform": (
+ "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal"
+ ),
+ "uname": [
+ "Linux",
+ "myhost",
+ "5.4.0-24-generic",
+ "SMP blah",
+ "x86_64",
+ ],
+ "variant": "ubuntu",
+ "dist": ["ubuntu", "20.04", "focal"],
+ }
+
+ self.assertCountEqual(
+ (
+ "merged_cfg",
+ "security-credentials",
+ ),
+ datasource.sensitive_metadata_keys,
+ )
+ with mock.patch("cloudinit.util.system_info", return_value=sys_info):
+ with mock.patch(
+ "cloudinit.sources.canonical_cloud_id",
+ return_value="canonical-cloud-id",
+ ):
+ datasource.get_data()
+ sensitive_json_file = self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, tmp)
+ content = util.load_file(sensitive_json_file)
+ expected = {
+ "base64_encoded_keys": [],
+ "merged_cfg": {
+ "_doc": (
+ "Merged cloud-init system config from "
+ "/etc/cloud/cloud.cfg and /etc/cloud/cloud.cfg.d/"
+ ),
+ "datasource": {"_undef": {"key1": False}},
+ },
+ "sensitive_keys": [
+ "ds/meta_data/some/security-credentials",
+ "merged_cfg",
+ ],
+ "sys_info": sys_info,
+ "v1": {
+ "_beta_keys": ["subplatform"],
+ "availability-zone": "myaz",
+ "availability_zone": "myaz",
+ "cloud_id": "canonical-cloud-id",
+ "cloud-name": "subclasscloudname",
+ "cloud_name": "subclasscloudname",
+ "distro": "ubuntu",
+ "distro_release": "focal",
+ "distro_version": "20.04",
+ "instance-id": "iid-datasource",
+ "instance_id": "iid-datasource",
+ "kernel_release": "5.4.0-24-generic",
+ "local-hostname": "test-subclass-hostname",
+ "local_hostname": "test-subclass-hostname",
+ "machine": "x86_64",
+ "platform": "mytestsubclass",
+ "public_ssh_keys": [],
+ "python_version": "3.7",
+ "region": "myregion",
+ "subplatform": "unknown",
+ "system_platform": (
+ "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal"
+ ),
+ "variant": "ubuntu",
+ },
+ "ds": {
+ "_doc": EXPERIMENTAL_TEXT,
+ "meta_data": {
+ "availability_zone": "myaz",
+ "local-hostname": "test-subclass-hostname",
+ "region": "myregion",
+ "some": {
+ "security-credentials": {
+ "cred1": "sekret",
+ "cred2": "othersekret",
+ }
+ },
+ },
+ },
+ }
+ self.assertCountEqual(expected, util.load_json(content))
+ file_stat = os.stat(sensitive_json_file)
+ self.assertEqual(0o600, stat.S_IMODE(file_stat.st_mode))
+ self.assertEqual(expected, util.load_json(content))
+
+ def test_get_data_handles_redacted_unserializable_content(self):
+ """get_data warns unserializable content in INSTANCE_JSON_FILE."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg,
+ self.distro,
+ Paths({"run_dir": tmp}),
+ custom_metadata={"key1": "val1", "key2": {"key2.1": self.paths}},
+ )
+ datasource.get_data()
+ json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+ content = util.load_file(json_file)
+ expected_metadata = {
+ "key1": "val1",
+ "key2": {
+ "key2.1": (
+ "Warning: redacted unserializable type <class"
+ " 'cloudinit.helpers.Paths'>"
+ )
+ },
+ }
+ instance_json = util.load_json(content)
+ self.assertEqual(expected_metadata, instance_json["ds"]["meta_data"])
+
+ def test_persist_instance_data_writes_ec2_metadata_when_set(self):
+ """When ec2_metadata class attribute is set, persist to json."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({"run_dir": tmp})
+ )
+ datasource.ec2_metadata = UNSET
+ datasource.get_data()
+ json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+ instance_data = util.load_json(util.load_file(json_file))
+ self.assertNotIn("ec2_metadata", instance_data["ds"])
+ datasource.ec2_metadata = {"ec2stuff": "is good"}
+ datasource.persist_instance_data()
+ instance_data = util.load_json(util.load_file(json_file))
+ self.assertEqual(
+ {"ec2stuff": "is good"}, instance_data["ds"]["ec2_metadata"]
+ )
+
+ def test_persist_instance_data_writes_canonical_cloud_id_and_symlink(self):
+ """canonical-cloud-id class attribute is set, persist to json."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({"run_dir": tmp})
+ )
+ cloud_id_link = os.path.join(tmp, "cloud-id")
+ cloud_id_file = os.path.join(tmp, "cloud-id-my-cloud")
+ cloud_id2_file = os.path.join(tmp, "cloud-id-my-cloud2")
+ for filename in (cloud_id_file, cloud_id_link, cloud_id2_file):
+ self.assertFalse(
+ os.path.exists(filename), "Unexpected link found {filename}"
+ )
+ with mock.patch(
+ "cloudinit.sources.canonical_cloud_id", return_value="my-cloud"
+ ):
+ datasource.get_data()
+ self.assertEqual("my-cloud\n", util.load_file(cloud_id_link))
+ # A symlink with the generic /run/cloud-init/cloud-id link is present
+ self.assertTrue(util.is_link(cloud_id_link))
+ # When cloud-id changes, symlink and content change
+ with mock.patch(
+ "cloudinit.sources.canonical_cloud_id", return_value="my-cloud2"
+ ):
+ datasource.persist_instance_data()
+ self.assertEqual("my-cloud2\n", util.load_file(cloud_id2_file))
+ # Previous cloud-id-<cloud-type> file removed
+ self.assertFalse(os.path.exists(cloud_id_file))
+ # Generic link persisted which contains canonical-cloud-id as content
+ self.assertTrue(util.is_link(cloud_id_link))
+ self.assertEqual("my-cloud2\n", util.load_file(cloud_id_link))
+
+ def test_persist_instance_data_writes_network_json_when_set(self):
+ """When network_data.json class attribute is set, persist to json."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({"run_dir": tmp})
+ )
+ datasource.get_data()
+ json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+ instance_data = util.load_json(util.load_file(json_file))
+ self.assertNotIn("network_json", instance_data["ds"])
+ datasource.network_json = {"network_json": "is good"}
+ datasource.persist_instance_data()
+ instance_data = util.load_json(util.load_file(json_file))
+ self.assertEqual(
+ {"network_json": "is good"}, instance_data["ds"]["network_json"]
+ )
+
+ def test_get_data_base64encodes_unserializable_bytes(self):
+ """On py3, get_data base64encodes any unserializable content."""
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg,
+ self.distro,
+ Paths({"run_dir": tmp}),
+ custom_metadata={"key1": "val1", "key2": {"key2.1": b"\x123"}},
+ )
+ self.assertTrue(datasource.get_data())
+ json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
+ content = util.load_file(json_file)
+ instance_json = util.load_json(content)
+ self.assertCountEqual(
+ ["ds/meta_data/key2/key2.1"], instance_json["base64_encoded_keys"]
+ )
+ self.assertEqual(
+ {"key1": "val1", "key2": {"key2.1": "EjM="}},
+ instance_json["ds"]["meta_data"],
+ )
+
+ def test_get_hostname_subclass_support(self):
+ """Validate get_hostname signature on all subclasses of DataSource."""
+ base_args = inspect.getfullargspec(DataSource.get_hostname)
+ # Import all DataSource subclasses so we can inspect them.
+ modules = util.find_modules(os.path.dirname(os.path.dirname(__file__)))
+ for _loc, name in modules.items():
+ mod_locs, _ = importer.find_module(name, ["cloudinit.sources"], [])
+ if mod_locs:
+ importer.import_module(mod_locs[0])
+ for child in DataSource.__subclasses__():
+ if "Test" in child.dsname:
+ continue
+ self.assertEqual(
+ base_args,
+ inspect.getfullargspec(child.get_hostname),
+ "%s does not implement DataSource.get_hostname params" % child,
+ )
+ for grandchild in child.__subclasses__():
+ self.assertEqual(
+ base_args,
+ inspect.getfullargspec(grandchild.get_hostname),
+ "%s does not implement DataSource.get_hostname params"
+ % grandchild,
+ )
+
+ def test_clear_cached_attrs_resets_cached_attr_class_attributes(self):
+ """Class attributes listed in cached_attr_defaults are reset."""
+ count = 0
+ # Setup values for all cached class attributes
+ for attr, value in self.datasource.cached_attr_defaults:
+ setattr(self.datasource, attr, count)
+ count += 1
+ self.datasource._dirty_cache = True
+ self.datasource.clear_cached_attrs()
+ for attr, value in self.datasource.cached_attr_defaults:
+ self.assertEqual(value, getattr(self.datasource, attr))
+
+ def test_clear_cached_attrs_noops_on_clean_cache(self):
+ """Class attributes listed in cached_attr_defaults are reset."""
+ count = 0
+ # Setup values for all cached class attributes
+ for attr, _ in self.datasource.cached_attr_defaults:
+ setattr(self.datasource, attr, count)
+ count += 1
+ self.datasource._dirty_cache = False # Fake clean cache
+ self.datasource.clear_cached_attrs()
+ count = 0
+ for attr, _ in self.datasource.cached_attr_defaults:
+ self.assertEqual(count, getattr(self.datasource, attr))
+ count += 1
+
+ def test_clear_cached_attrs_skips_non_attr_class_attributes(self):
+ """Skip any cached_attr_defaults which aren't class attributes."""
+ self.datasource._dirty_cache = True
+ self.datasource.clear_cached_attrs()
+ for attr in ("ec2_metadata", "network_json"):
+ self.assertFalse(hasattr(self.datasource, attr))
+
+ def test_clear_cached_attrs_of_custom_attrs(self):
+ """Custom attr_values can be passed to clear_cached_attrs."""
+ self.datasource._dirty_cache = True
+ cached_attr_name = self.datasource.cached_attr_defaults[0][0]
+ setattr(self.datasource, cached_attr_name, "himom")
+ self.datasource.myattr = "orig"
+ self.datasource.clear_cached_attrs(
+ attr_defaults=(("myattr", "updated"),)
+ )
+ self.assertEqual("himom", getattr(self.datasource, cached_attr_name))
+ self.assertEqual("updated", self.datasource.myattr)
+
+ @mock.patch.dict(
+ DataSource.default_update_events,
+ {EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}},
+ )
+ @mock.patch.dict(
+ DataSource.supported_update_events,
+ {EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}},
+ )
+ def test_update_metadata_only_acts_on_supported_update_events(self):
+ """update_metadata_if_supported wont get_data on unsupported events."""
+ self.assertEqual(
+ {EventScope.NETWORK: set([EventType.BOOT_NEW_INSTANCE])},
+ self.datasource.default_update_events,
+ )
+
+ def fake_get_data():
+ raise Exception("get_data should not be called")
+
+ self.datasource.get_data = fake_get_data
+ self.assertFalse(
+ self.datasource.update_metadata_if_supported(
+ source_event_types=[EventType.BOOT]
+ )
+ )
+
+ @mock.patch.dict(
+ DataSource.supported_update_events,
+ {EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}},
+ )
+ def test_update_metadata_returns_true_on_supported_update_event(self):
+ """update_metadata_if_supported returns get_data on supported events"""
+
+ def fake_get_data():
+ return True
+
+ self.datasource.get_data = fake_get_data
+ self.datasource._network_config = "something"
+ self.datasource._dirty_cache = True
+ self.assertTrue(
+ self.datasource.update_metadata_if_supported(
+ source_event_types=[
+ EventType.BOOT,
+ EventType.BOOT_NEW_INSTANCE,
+ ]
+ )
+ )
+ self.assertEqual(UNSET, self.datasource._network_config)
+
+ self.assertIn(
+ "DEBUG: Update datasource metadata and network config due to"
+ " events: boot-new-instance",
+ self.logs.getvalue(),
+ )
+
+
+class TestRedactSensitiveData(CiTestCase):
+ def test_redact_sensitive_data_noop_when_no_sensitive_keys_present(self):
+ """When sensitive_keys is absent or empty from metadata do nothing."""
+ md = {"my": "data"}
+ self.assertEqual(
+ md, redact_sensitive_keys(md, redact_value="redacted")
+ )
+ md["sensitive_keys"] = []
+ self.assertEqual(
+ md, redact_sensitive_keys(md, redact_value="redacted")
+ )
+
+ def test_redact_sensitive_data_redacts_exact_match_name(self):
+ """Only exact matched sensitive_keys are redacted from metadata."""
+ md = {
+ "sensitive_keys": ["md/secure"],
+ "md": {"secure": "s3kr1t", "insecure": "publik"},
+ }
+ secure_md = copy.deepcopy(md)
+ secure_md["md"]["secure"] = "redacted"
+ self.assertEqual(
+ secure_md, redact_sensitive_keys(md, redact_value="redacted")
+ )
+
+ def test_redact_sensitive_data_does_redacts_with_default_string(self):
+ """When redact_value is absent, REDACT_SENSITIVE_VALUE is used."""
+ md = {
+ "sensitive_keys": ["md/secure"],
+ "md": {"secure": "s3kr1t", "insecure": "publik"},
+ }
+ secure_md = copy.deepcopy(md)
+ secure_md["md"]["secure"] = "redacted for non-root user"
+ self.assertEqual(secure_md, redact_sensitive_keys(md))
+
+
+class TestCanonicalCloudID(CiTestCase):
+ def test_cloud_id_returns_platform_on_unknowns(self):
+ """When region and cloud_name are unknown, return platform."""
+ self.assertEqual(
+ "platform",
+ canonical_cloud_id(
+ cloud_name=METADATA_UNKNOWN,
+ region=METADATA_UNKNOWN,
+ platform="platform",
+ ),
+ )
+
+ def test_cloud_id_returns_platform_on_none(self):
+ """When region and cloud_name are unknown, return platform."""
+ self.assertEqual(
+ "platform",
+ canonical_cloud_id(
+ cloud_name=None, region=None, platform="platform"
+ ),
+ )
+
+ def test_cloud_id_returns_cloud_name_on_unknown_region(self):
+ """When region is unknown, return cloud_name."""
+ for region in (None, METADATA_UNKNOWN):
+ self.assertEqual(
+ "cloudname",
+ canonical_cloud_id(
+ cloud_name="cloudname", region=region, platform="platform"
+ ),
+ )
+
+ def test_cloud_id_returns_platform_on_unknown_cloud_name(self):
+ """When region is set but cloud_name is unknown return cloud_name."""
+ self.assertEqual(
+ "platform",
+ canonical_cloud_id(
+ cloud_name=METADATA_UNKNOWN,
+ region="region",
+ platform="platform",
+ ),
+ )
+
+ def test_cloud_id_aws_based_on_region_and_cloud_name(self):
+ """When cloud_name is aws, return proper cloud-id based on region."""
+ self.assertEqual(
+ "aws-china",
+ canonical_cloud_id(
+ cloud_name="aws", region="cn-north-1", platform="platform"
+ ),
+ )
+ self.assertEqual(
+ "aws",
+ canonical_cloud_id(
+ cloud_name="aws", region="us-east-1", platform="platform"
+ ),
+ )
+ self.assertEqual(
+ "aws-gov",
+ canonical_cloud_id(
+ cloud_name="aws", region="us-gov-1", platform="platform"
+ ),
+ )
+ self.assertEqual( # Overrideen non-aws cloud_name is returned
+ "!aws",
+ canonical_cloud_id(
+ cloud_name="!aws", region="us-gov-1", platform="platform"
+ ),
+ )
+
+ def test_cloud_id_azure_based_on_region_and_cloud_name(self):
+ """Report cloud-id when cloud_name is azure and region is in china."""
+ self.assertEqual(
+ "azure-china",
+ canonical_cloud_id(
+ cloud_name="azure", region="chinaeast", platform="platform"
+ ),
+ )
+ self.assertEqual(
+ "azure",
+ canonical_cloud_id(
+ cloud_name="azure", region="!chinaeast", platform="platform"
+ ),
+ )
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_lxd.py b/tests/unittests/sources/test_lxd.py
new file mode 100644
index 00000000..e11c3746
--- /dev/null
+++ b/tests/unittests/sources/test_lxd.py
@@ -0,0 +1,394 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import json
+import re
+import stat
+from collections import namedtuple
+from copy import deepcopy
+from unittest import mock
+
+import pytest
+import yaml
+
+from cloudinit.sources import UNSET
+from cloudinit.sources import DataSourceLXD as lxd
+from cloudinit.sources import InvalidMetaDataException
+
+DS_PATH = "cloudinit.sources.DataSourceLXD."
+
+
+LStatResponse = namedtuple("lstatresponse", "st_mode")
+
+
+NETWORK_V1 = {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": "eth0",
+ "subnets": [{"type": "dhcp", "control": "auto"}],
+ }
+ ],
+}
+
+
+def _add_network_v1_device(devname) -> dict:
+ """Helper to inject device name into default network v1 config."""
+ network_cfg = deepcopy(NETWORK_V1)
+ network_cfg["config"][0]["name"] = devname
+ return network_cfg
+
+
+LXD_V1_METADATA = {
+ "meta-data": "instance-id: my-lxc\nlocal-hostname: my-lxc\n\n",
+ "network-config": NETWORK_V1,
+ "user-data": "#cloud-config\npackages: [sl]\n",
+ "vendor-data": "#cloud-config\nruncmd: ['echo vendor-data']\n",
+ "config": {
+ "user.user-data": "instance-id: my-lxc\nlocal-hostname: my-lxc\n\n",
+ "user.vendor-data": "#cloud-config\nruncmd: ['echo vendor-data']\n",
+ "user.network-config": yaml.safe_dump(NETWORK_V1),
+ },
+}
+
+
+@pytest.fixture
+def lxd_metadata():
+ return LXD_V1_METADATA
+
+
+@pytest.fixture
+def lxd_ds(request, paths, lxd_metadata):
+ """
+ Return an instantiated DataSourceLXD.
+
+ This also performs the mocking required for the default test case:
+ * ``is_platform_viable`` returns True,
+ * ``read_metadata`` returns ``LXD_V1_METADATA``
+
+ (This uses the paths fixture for the required helpers.Paths object)
+ """
+ with mock.patch(DS_PATH + "is_platform_viable", return_value=True):
+ with mock.patch(DS_PATH + "read_metadata", return_value=lxd_metadata):
+ yield lxd.DataSourceLXD(
+ sys_cfg={}, distro=mock.Mock(), paths=paths
+ )
+
+
+class TestGenerateFallbackNetworkConfig:
+ @pytest.mark.parametrize(
+ "uname_machine,systemd_detect_virt,expected",
+ (
+ # None for systemd_detect_virt returns None from which
+ ({}, None, NETWORK_V1),
+ ({}, None, NETWORK_V1),
+ ("anything", "lxc\n", NETWORK_V1),
+ # `uname -m` on kvm determines devname
+ ("x86_64", "kvm\n", _add_network_v1_device("enp5s0")),
+ ("ppc64le", "kvm\n", _add_network_v1_device("enp0s5")),
+ ("s390x", "kvm\n", _add_network_v1_device("enc9")),
+ ),
+ )
+ @mock.patch(DS_PATH + "util.system_info")
+ @mock.patch(DS_PATH + "subp.subp")
+ @mock.patch(DS_PATH + "subp.which")
+ def test_net_v2_based_on_network_mode_virt_type_and_uname_machine(
+ self,
+ m_which,
+ m_subp,
+ m_system_info,
+ uname_machine,
+ systemd_detect_virt,
+ expected,
+ ):
+ """Return network config v2 based on uname -m, systemd-detect-virt."""
+ if systemd_detect_virt is None:
+ m_which.return_value = None
+ m_system_info.return_value = {"uname": ["", "", "", "", uname_machine]}
+ m_subp.return_value = (systemd_detect_virt, "")
+ assert expected == lxd.generate_fallback_network_config()
+ if systemd_detect_virt is None:
+ assert 0 == m_subp.call_count
+ assert 0 == m_system_info.call_count
+ else:
+ assert [
+ mock.call(["systemd-detect-virt"])
+ ] == m_subp.call_args_list
+ if systemd_detect_virt != "kvm\n":
+ assert 0 == m_system_info.call_count
+ else:
+ assert 1 == m_system_info.call_count
+
+
+class TestDataSourceLXD:
+ def test_platform_info(self, lxd_ds):
+ assert "LXD" == lxd_ds.dsname
+ assert "lxd" == lxd_ds.cloud_name
+ assert "lxd" == lxd_ds.platform_type
+
+ def test_subplatform(self, lxd_ds):
+ assert "LXD socket API v. 1.0 (/dev/lxd/sock)" == lxd_ds.subplatform
+
+ def test__get_data(self, lxd_ds):
+ """get_data calls read_metadata, setting appropiate instance attrs."""
+ assert UNSET == lxd_ds._crawled_metadata
+ assert UNSET == lxd_ds._network_config
+ assert None is lxd_ds.userdata_raw
+ assert True is lxd_ds._get_data()
+ assert LXD_V1_METADATA == lxd_ds._crawled_metadata
+ # network-config is dumped from YAML
+ assert NETWORK_V1 == lxd_ds._network_config
+ # Any user-data and vendor-data are saved as raw
+ assert LXD_V1_METADATA["user-data"] == lxd_ds.userdata_raw
+ assert LXD_V1_METADATA["vendor-data"] == lxd_ds.vendordata_raw
+
+
+class TestIsPlatformViable:
+ @pytest.mark.parametrize(
+ "exists,lstat_mode,expected",
+ (
+ (False, None, False),
+ (True, stat.S_IFREG, False),
+ (True, stat.S_IFSOCK, True),
+ ),
+ )
+ @mock.patch(DS_PATH + "os.lstat")
+ @mock.patch(DS_PATH + "os.path.exists")
+ def test_expected_viable(
+ self, m_exists, m_lstat, exists, lstat_mode, expected
+ ):
+ """Return True only when LXD_SOCKET_PATH exists and is a socket."""
+ m_exists.return_value = exists
+ m_lstat.return_value = LStatResponse(lstat_mode)
+ assert expected is lxd.is_platform_viable()
+ m_exists.assert_has_calls([mock.call(lxd.LXD_SOCKET_PATH)])
+ if exists:
+ m_lstat.assert_has_calls([mock.call(lxd.LXD_SOCKET_PATH)])
+ else:
+ assert 0 == m_lstat.call_count
+
+
+class TestReadMetadata:
+ @pytest.mark.parametrize(
+ "url_responses,expected,logs",
+ (
+ ( # Assert non-JSON format from config route
+ {
+ "http://lxd/1.0/meta-data": "local-hostname: md\n",
+ "http://lxd/1.0/config": "[NOT_JSON",
+ },
+ InvalidMetaDataException(
+ "Unable to determine cloud-init config from"
+ " http://lxd/1.0/config. Expected JSON but found:"
+ " [NOT_JSON"
+ ),
+ [
+ "[GET] [HTTP:200] http://lxd/1.0/meta-data",
+ "[GET] [HTTP:200] http://lxd/1.0/config",
+ ],
+ ),
+ ( # Assert success on just meta-data
+ {
+ "http://lxd/1.0/meta-data": "local-hostname: md\n",
+ "http://lxd/1.0/config": "[]",
+ },
+ {
+ "_metadata_api_version": lxd.LXD_SOCKET_API_VERSION,
+ "config": {},
+ "meta-data": "local-hostname: md\n",
+ },
+ [
+ "[GET] [HTTP:200] http://lxd/1.0/meta-data",
+ "[GET] [HTTP:200] http://lxd/1.0/config",
+ ],
+ ),
+ ( # Assert 404s for config routes log skipping
+ {
+ "http://lxd/1.0/meta-data": "local-hostname: md\n",
+ "http://lxd/1.0/config": (
+ '["/1.0/config/user.custom1",'
+ ' "/1.0/config/user.meta-data",'
+ ' "/1.0/config/user.network-config",'
+ ' "/1.0/config/user.user-data",'
+ ' "/1.0/config/user.vendor-data"]'
+ ),
+ "http://lxd/1.0/config/user.custom1": "custom1",
+ "http://lxd/1.0/config/user.meta-data": "", # 404
+ "http://lxd/1.0/config/user.network-config": "net-config",
+ "http://lxd/1.0/config/user.user-data": "", # 404
+ "http://lxd/1.0/config/user.vendor-data": "", # 404
+ },
+ {
+ "_metadata_api_version": lxd.LXD_SOCKET_API_VERSION,
+ "config": {
+ "user.custom1": "custom1", # Not promoted
+ "user.network-config": "net-config",
+ },
+ "meta-data": "local-hostname: md\n",
+ "network-config": "net-config",
+ },
+ [
+ "Skipping http://lxd/1.0/config/user.vendor-data on"
+ " [HTTP:404]",
+ "Skipping http://lxd/1.0/config/user.meta-data on"
+ " [HTTP:404]",
+ "Skipping http://lxd/1.0/config/user.user-data on"
+ " [HTTP:404]",
+ "[GET] [HTTP:200] http://lxd/1.0/config",
+ "[GET] [HTTP:200] http://lxd/1.0/config/user.custom1",
+ "[GET] [HTTP:200]"
+ " http://lxd/1.0/config/user.network-config",
+ ],
+ ),
+ ( # Assert all CONFIG_KEY_ALIASES promoted to top-level keys
+ {
+ "http://lxd/1.0/meta-data": "local-hostname: md\n",
+ "http://lxd/1.0/config": (
+ '["/1.0/config/user.custom1",'
+ ' "/1.0/config/user.meta-data",'
+ ' "/1.0/config/user.network-config",'
+ ' "/1.0/config/user.user-data",'
+ ' "/1.0/config/user.vendor-data"]'
+ ),
+ "http://lxd/1.0/config/user.custom1": "custom1",
+ "http://lxd/1.0/config/user.meta-data": "meta-data",
+ "http://lxd/1.0/config/user.network-config": "net-config",
+ "http://lxd/1.0/config/user.user-data": "user-data",
+ "http://lxd/1.0/config/user.vendor-data": "vendor-data",
+ },
+ {
+ "_metadata_api_version": lxd.LXD_SOCKET_API_VERSION,
+ "config": {
+ "user.custom1": "custom1", # Not promoted
+ "user.meta-data": "meta-data",
+ "user.network-config": "net-config",
+ "user.user-data": "user-data",
+ "user.vendor-data": "vendor-data",
+ },
+ "meta-data": "local-hostname: md\n",
+ "network-config": "net-config",
+ "user-data": "user-data",
+ "vendor-data": "vendor-data",
+ },
+ [
+ "[GET] [HTTP:200] http://lxd/1.0/meta-data",
+ "[GET] [HTTP:200] http://lxd/1.0/config",
+ "[GET] [HTTP:200] http://lxd/1.0/config/user.custom1",
+ "[GET] [HTTP:200] http://lxd/1.0/config/user.meta-data",
+ "[GET] [HTTP:200]"
+ " http://lxd/1.0/config/user.network-config",
+ "[GET] [HTTP:200] http://lxd/1.0/config/user.user-data",
+ "[GET] [HTTP:200] http://lxd/1.0/config/user.vendor-data",
+ ],
+ ),
+ ( # Assert cloud-init.* config key values prefered over user.*
+ {
+ "http://lxd/1.0/meta-data": "local-hostname: md\n",
+ "http://lxd/1.0/config": (
+ '["/1.0/config/user.meta-data",'
+ ' "/1.0/config/user.network-config",'
+ ' "/1.0/config/user.user-data",'
+ ' "/1.0/config/user.vendor-data",'
+ ' "/1.0/config/cloud-init.network-config",'
+ ' "/1.0/config/cloud-init.user-data",'
+ ' "/1.0/config/cloud-init.vendor-data"]'
+ ),
+ "http://lxd/1.0/config/user.meta-data": "user.meta-data",
+ "http://lxd/1.0/config/user.network-config": (
+ "user.network-config"
+ ),
+ "http://lxd/1.0/config/user.user-data": "user.user-data",
+ "http://lxd/1.0/config/user.vendor-data": (
+ "user.vendor-data"
+ ),
+ "http://lxd/1.0/config/cloud-init.meta-data": (
+ "cloud-init.meta-data"
+ ),
+ "http://lxd/1.0/config/cloud-init.network-config": (
+ "cloud-init.network-config"
+ ),
+ "http://lxd/1.0/config/cloud-init.user-data": (
+ "cloud-init.user-data"
+ ),
+ "http://lxd/1.0/config/cloud-init.vendor-data": (
+ "cloud-init.vendor-data"
+ ),
+ },
+ {
+ "_metadata_api_version": lxd.LXD_SOCKET_API_VERSION,
+ "config": {
+ "user.meta-data": "user.meta-data",
+ "user.network-config": "user.network-config",
+ "user.user-data": "user.user-data",
+ "user.vendor-data": "user.vendor-data",
+ "cloud-init.network-config": (
+ "cloud-init.network-config"
+ ),
+ "cloud-init.user-data": "cloud-init.user-data",
+ "cloud-init.vendor-data": "cloud-init.vendor-data",
+ },
+ "meta-data": "local-hostname: md\n",
+ "network-config": "cloud-init.network-config",
+ "user-data": "cloud-init.user-data",
+ "vendor-data": "cloud-init.vendor-data",
+ },
+ [
+ "[GET] [HTTP:200] http://lxd/1.0/meta-data",
+ "[GET] [HTTP:200] http://lxd/1.0/config",
+ "[GET] [HTTP:200] http://lxd/1.0/config/user.meta-data",
+ "[GET] [HTTP:200]"
+ " http://lxd/1.0/config/user.network-config",
+ "[GET] [HTTP:200] http://lxd/1.0/config/user.user-data",
+ "[GET] [HTTP:200] http://lxd/1.0/config/user.vendor-data",
+ "[GET] [HTTP:200]"
+ " http://lxd/1.0/config/cloud-init.network-config",
+ "[GET] [HTTP:200]"
+ " http://lxd/1.0/config/cloud-init.user-data",
+ "[GET] [HTTP:200]"
+ " http://lxd/1.0/config/cloud-init.vendor-data",
+ "Ignoring LXD config user.user-data in favor of"
+ " cloud-init.user-data value.",
+ "Ignoring LXD config user.network-config in favor of"
+ " cloud-init.network-config value.",
+ "Ignoring LXD config user.vendor-data in favor of"
+ " cloud-init.vendor-data value.",
+ ],
+ ),
+ ),
+ )
+ @mock.patch.object(lxd.requests.Session, "get")
+ def test_read_metadata_handles_unexpected_content_or_http_status(
+ self, session_get, url_responses, expected, logs, caplog
+ ):
+ """read_metadata handles valid and invalid content and status codes."""
+
+ def fake_get(url):
+ """Mock Response json, ok, status_code, text from url_responses."""
+ m_resp = mock.MagicMock()
+ content = url_responses.get(url, "")
+ m_resp.json.side_effect = lambda: json.loads(content)
+ if content:
+ mock_ok = mock.PropertyMock(return_value=True)
+ mock_status_code = mock.PropertyMock(return_value=200)
+ else:
+ mock_ok = mock.PropertyMock(return_value=False)
+ mock_status_code = mock.PropertyMock(return_value=404)
+ type(m_resp).ok = mock_ok
+ type(m_resp).status_code = mock_status_code
+ mock_text = mock.PropertyMock(return_value=content)
+ type(m_resp).text = mock_text
+ return m_resp
+
+ session_get.side_effect = fake_get
+
+ if isinstance(expected, Exception):
+ with pytest.raises(type(expected), match=re.escape(str(expected))):
+ lxd.read_metadata()
+ else:
+ assert expected == lxd.read_metadata()
+ caplogs = caplog.text
+ for log in logs:
+ assert log in caplogs
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/sources/test_maas.py
index 41b6c27b..e95ba374 100644
--- a/tests/unittests/test_datasource/test_maas.py
+++ b/tests/unittests/sources/test_maas.py
@@ -1,19 +1,19 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from copy import copy
import os
import shutil
import tempfile
-import yaml
+from copy import copy
from unittest import mock
-from cloudinit.sources import DataSourceMAAS
+import yaml
+
from cloudinit import url_helper
-from cloudinit.tests.helpers import CiTestCase, populate_dir
+from cloudinit.sources import DataSourceMAAS
+from tests.unittests.helpers import CiTestCase, populate_dir
class TestMAASDataSource(CiTestCase):
-
def setUp(self):
super(TestMAASDataSource, self).setUp()
# Make a temp directoy for tests to use.
@@ -23,11 +23,13 @@ class TestMAASDataSource(CiTestCase):
def test_seed_dir_valid(self):
"""Verify a valid seeddir is read as such."""
- userdata = b'valid01-userdata'
- data = {'meta-data/instance-id': 'i-valid01',
- 'meta-data/local-hostname': 'valid01-hostname',
- 'user-data': userdata,
- 'public-keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname'}
+ userdata = b"valid01-userdata"
+ data = {
+ "meta-data/instance-id": "i-valid01",
+ "meta-data/local-hostname": "valid01-hostname",
+ "user-data": userdata,
+ "public-keys": "ssh-rsa AAAAB3Nz...aC1yc2E= keyname",
+ }
my_d = os.path.join(self.tmp, "valid")
populate_dir(my_d, data)
@@ -35,20 +37,23 @@ class TestMAASDataSource(CiTestCase):
ud, md, vd = DataSourceMAAS.read_maas_seed_dir(my_d)
self.assertEqual(userdata, ud)
- for key in ('instance-id', 'local-hostname'):
+ for key in ("instance-id", "local-hostname"):
self.assertEqual(data["meta-data/" + key], md[key])
# verify that 'userdata' is not returned as part of the metadata
- self.assertFalse(('user-data' in md))
+ self.assertFalse(("user-data" in md))
self.assertIsNone(vd)
def test_seed_dir_valid_extra(self):
"""Verify extra files do not affect seed_dir validity."""
- userdata = b'valid-extra-userdata'
- data = {'meta-data/instance-id': 'i-valid-extra',
- 'meta-data/local-hostname': 'valid-extra-hostname',
- 'user-data': userdata, 'foo': 'bar'}
+ userdata = b"valid-extra-userdata"
+ data = {
+ "meta-data/instance-id": "i-valid-extra",
+ "meta-data/local-hostname": "valid-extra-hostname",
+ "user-data": userdata,
+ "foo": "bar",
+ }
my_d = os.path.join(self.tmp, "valid_extra")
populate_dir(my_d, data)
@@ -56,62 +61,77 @@ class TestMAASDataSource(CiTestCase):
ud, md, _vd = DataSourceMAAS.read_maas_seed_dir(my_d)
self.assertEqual(userdata, ud)
- for key in ('instance-id', 'local-hostname'):
- self.assertEqual(data['meta-data/' + key], md[key])
+ for key in ("instance-id", "local-hostname"):
+ self.assertEqual(data["meta-data/" + key], md[key])
# additional files should not just appear as keys in metadata atm
- self.assertFalse(('foo' in md))
+ self.assertFalse(("foo" in md))
def test_seed_dir_invalid(self):
"""Verify that invalid seed_dir raises MAASSeedDirMalformed."""
- valid = {'instance-id': 'i-instanceid',
- 'local-hostname': 'test-hostname', 'user-data': ''}
+ valid = {
+ "instance-id": "i-instanceid",
+ "local-hostname": "test-hostname",
+ "user-data": "",
+ }
my_based = os.path.join(self.tmp, "valid_extra")
# missing 'userdata' file
my_d = "%s-01" % my_based
invalid_data = copy(valid)
- del invalid_data['local-hostname']
+ del invalid_data["local-hostname"]
populate_dir(my_d, invalid_data)
- self.assertRaises(DataSourceMAAS.MAASSeedDirMalformed,
- DataSourceMAAS.read_maas_seed_dir, my_d)
+ self.assertRaises(
+ DataSourceMAAS.MAASSeedDirMalformed,
+ DataSourceMAAS.read_maas_seed_dir,
+ my_d,
+ )
# missing 'instance-id'
my_d = "%s-02" % my_based
invalid_data = copy(valid)
- del invalid_data['instance-id']
+ del invalid_data["instance-id"]
populate_dir(my_d, invalid_data)
- self.assertRaises(DataSourceMAAS.MAASSeedDirMalformed,
- DataSourceMAAS.read_maas_seed_dir, my_d)
+ self.assertRaises(
+ DataSourceMAAS.MAASSeedDirMalformed,
+ DataSourceMAAS.read_maas_seed_dir,
+ my_d,
+ )
def test_seed_dir_none(self):
"""Verify that empty seed_dir raises MAASSeedDirNone."""
my_d = os.path.join(self.tmp, "valid_empty")
- self.assertRaises(DataSourceMAAS.MAASSeedDirNone,
- DataSourceMAAS.read_maas_seed_dir, my_d)
+ self.assertRaises(
+ DataSourceMAAS.MAASSeedDirNone,
+ DataSourceMAAS.read_maas_seed_dir,
+ my_d,
+ )
def test_seed_dir_missing(self):
"""Verify that missing seed_dir raises MAASSeedDirNone."""
- self.assertRaises(DataSourceMAAS.MAASSeedDirNone,
- DataSourceMAAS.read_maas_seed_dir,
- os.path.join(self.tmp, "nonexistantdirectory"))
+ self.assertRaises(
+ DataSourceMAAS.MAASSeedDirNone,
+ DataSourceMAAS.read_maas_seed_dir,
+ os.path.join(self.tmp, "nonexistantdirectory"),
+ )
def mock_read_maas_seed_url(self, data, seed, version="19991231"):
"""mock up readurl to appear as a web server at seed has provided data.
return what read_maas_seed_url returns."""
+
def my_readurl(*args, **kwargs):
if len(args):
url = args[0]
else:
- url = kwargs['url']
+ url = kwargs["url"]
prefix = "%s/%s/" % (seed, version)
if not url.startswith(prefix):
raise ValueError("unexpected call %s" % url)
- short = url[len(prefix):]
+ short = url[len(prefix) :]
if short not in data:
raise url_helper.UrlError("not found", code=404, url=url)
return url_helper.StringResponse(data[short])
@@ -124,44 +144,48 @@ class TestMAASDataSource(CiTestCase):
def test_seed_url_valid(self):
"""Verify that valid seed_url is read as such."""
valid = {
- 'meta-data/instance-id': 'i-instanceid',
- 'meta-data/local-hostname': 'test-hostname',
- 'meta-data/public-keys': 'test-hostname',
- 'meta-data/vendor-data': b'my-vendordata',
- 'user-data': b'foodata',
+ "meta-data/instance-id": "i-instanceid",
+ "meta-data/local-hostname": "test-hostname",
+ "meta-data/public-keys": "test-hostname",
+ "meta-data/vendor-data": b"my-vendordata",
+ "user-data": b"foodata",
}
my_seed = "http://example.com/xmeta"
my_ver = "1999-99-99"
ud, md, vd = self.mock_read_maas_seed_url(valid, my_seed, my_ver)
- self.assertEqual(valid['meta-data/instance-id'], md['instance-id'])
+ self.assertEqual(valid["meta-data/instance-id"], md["instance-id"])
self.assertEqual(
- valid['meta-data/local-hostname'], md['local-hostname'])
- self.assertEqual(valid['meta-data/public-keys'], md['public-keys'])
- self.assertEqual(valid['user-data'], ud)
+ valid["meta-data/local-hostname"], md["local-hostname"]
+ )
+ self.assertEqual(valid["meta-data/public-keys"], md["public-keys"])
+ self.assertEqual(valid["user-data"], ud)
# vendor-data is yaml, which decodes a string
- self.assertEqual(valid['meta-data/vendor-data'].decode(), vd)
+ self.assertEqual(valid["meta-data/vendor-data"].decode(), vd)
def test_seed_url_vendor_data_dict(self):
- expected_vd = {'key1': 'value1'}
+ expected_vd = {"key1": "value1"}
valid = {
- 'meta-data/instance-id': 'i-instanceid',
- 'meta-data/local-hostname': 'test-hostname',
- 'meta-data/vendor-data': yaml.safe_dump(expected_vd).encode(),
+ "meta-data/instance-id": "i-instanceid",
+ "meta-data/local-hostname": "test-hostname",
+ "meta-data/vendor-data": yaml.safe_dump(expected_vd).encode(),
}
_ud, md, vd = self.mock_read_maas_seed_url(
- valid, "http://example.com/foo")
+ valid, "http://example.com/foo"
+ )
- self.assertEqual(valid['meta-data/instance-id'], md['instance-id'])
+ self.assertEqual(valid["meta-data/instance-id"], md["instance-id"])
self.assertEqual(expected_vd, vd)
@mock.patch("cloudinit.sources.DataSourceMAAS.url_helper.OauthUrlHelper")
class TestGetOauthHelper(CiTestCase):
- base_cfg = {'consumer_key': 'FAKE_CONSUMER_KEY',
- 'token_key': 'FAKE_TOKEN_KEY',
- 'token_secret': 'FAKE_TOKEN_SECRET',
- 'consumer_secret': None}
+ base_cfg = {
+ "consumer_key": "FAKE_CONSUMER_KEY",
+ "token_key": "FAKE_TOKEN_KEY",
+ "token_secret": "FAKE_TOKEN_SECRET",
+ "consumer_secret": None,
+ }
def test_all_required(self, m_helper):
"""Valid config as expected."""
@@ -171,17 +195,20 @@ class TestGetOauthHelper(CiTestCase):
def test_other_fields_not_passed_through(self, m_helper):
"""Only relevant fields are passed through."""
mycfg = self.base_cfg.copy()
- mycfg['unrelated_field'] = 'unrelated'
+ mycfg["unrelated_field"] = "unrelated"
DataSourceMAAS.get_oauth_helper(mycfg)
m_helper.assert_has_calls([mock.call(**self.base_cfg)])
class TestGetIdHash(CiTestCase):
- v1_cfg = {'consumer_key': 'CKEY', 'token_key': 'TKEY',
- 'token_secret': 'TSEC'}
+ v1_cfg = {
+ "consumer_key": "CKEY",
+ "token_key": "TKEY",
+ "token_secret": "TSEC",
+ }
v1_id = (
- 'v1:'
- '403ee5f19c956507f1d0e50814119c405902137ea4f8838bde167c5da8110392')
+ "v1:403ee5f19c956507f1d0e50814119c405902137ea4f8838bde167c5da8110392"
+ )
def test_v1_expected(self):
"""Test v1 id generated as expected working behavior from config."""
@@ -191,8 +218,8 @@ class TestGetIdHash(CiTestCase):
def test_v1_extra_fields_are_ignored(self):
"""Test v1 id ignores unused entries in config."""
cfg = self.v1_cfg.copy()
- cfg['consumer_secret'] = "BOO"
- cfg['unrelated'] = "HI MOM"
+ cfg["consumer_secret"] = "BOO"
+ cfg["unrelated"] = "HI MOM"
result = DataSourceMAAS.get_id_from_ds_cfg(cfg)
self.assertEqual(self.v1_id, result)
diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/sources/test_nocloud.py
index 02cc9b38..1f6b722d 100644
--- a/tests/unittests/test_datasource/test_nocloud.py
+++ b/tests/unittests/sources/test_nocloud.py
@@ -1,27 +1,27 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import dmi
-from cloudinit import helpers
-from cloudinit.sources.DataSourceNoCloud import (
- DataSourceNoCloud as dsNoCloud,
- _maybe_remove_top_network,
- parse_cmdline_data)
-from cloudinit import util
-from cloudinit.tests.helpers import CiTestCase, populate_dir, mock, ExitStack
-
import os
import textwrap
+
import yaml
+from cloudinit import dmi, helpers, util
+from cloudinit.sources.DataSourceNoCloud import DataSourceNoCloud as dsNoCloud
+from cloudinit.sources.DataSourceNoCloud import (
+ _maybe_remove_top_network,
+ parse_cmdline_data,
+)
+from tests.unittests.helpers import CiTestCase, ExitStack, mock, populate_dir
+
-@mock.patch('cloudinit.sources.DataSourceNoCloud.util.is_lxd')
+@mock.patch("cloudinit.sources.DataSourceNoCloud.util.is_lxd")
class TestNoCloudDataSource(CiTestCase):
-
def setUp(self):
super(TestNoCloudDataSource, self).setUp()
self.tmp = self.tmp_dir()
self.paths = helpers.Paths(
- {'cloud_dir': self.tmp, 'run_dir': self.tmp})
+ {"cloud_dir": self.tmp, "run_dir": self.tmp}
+ )
self.cmdline = "root=TESTCMDLINE"
@@ -29,77 +29,77 @@ class TestNoCloudDataSource(CiTestCase):
self.addCleanup(self.mocks.close)
self.mocks.enter_context(
- mock.patch.object(util, 'get_cmdline', return_value=self.cmdline))
+ mock.patch.object(util, "get_cmdline", return_value=self.cmdline)
+ )
self.mocks.enter_context(
- mock.patch.object(dmi, 'read_dmi_data', return_value=None))
+ mock.patch.object(dmi, "read_dmi_data", return_value=None)
+ )
def _test_fs_config_is_read(self, fs_label, fs_label_to_search):
- vfat_device = 'device-1'
+ vfat_device = "device-1"
def m_mount_cb(device, callback, mtype):
- if (device == vfat_device):
- return {'meta-data': yaml.dump({'instance-id': 'IID'})}
+ if device == vfat_device:
+ return {"meta-data": yaml.dump({"instance-id": "IID"})}
else:
return {}
- def m_find_devs_with(query='', path=''):
- if 'TYPE=vfat' == query:
+ def m_find_devs_with(query="", path=""):
+ if "TYPE=vfat" == query:
return [vfat_device]
- elif 'LABEL={}'.format(fs_label) == query:
+ elif "LABEL={}".format(fs_label) == query:
return [vfat_device]
else:
return []
self.mocks.enter_context(
- mock.patch.object(util, 'find_devs_with',
- side_effect=m_find_devs_with))
+ mock.patch.object(
+ util, "find_devs_with", side_effect=m_find_devs_with
+ )
+ )
self.mocks.enter_context(
- mock.patch.object(util, 'mount_cb',
- side_effect=m_mount_cb))
- sys_cfg = {'datasource': {'NoCloud': {'fs_label': fs_label_to_search}}}
+ mock.patch.object(util, "mount_cb", side_effect=m_mount_cb)
+ )
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": fs_label_to_search}}}
dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
ret = dsrc.get_data()
- self.assertEqual(dsrc.metadata.get('instance-id'), 'IID')
+ self.assertEqual(dsrc.metadata.get("instance-id"), "IID")
self.assertTrue(ret)
def test_nocloud_seed_dir_on_lxd(self, m_is_lxd):
- md = {'instance-id': 'IID', 'dsmode': 'local'}
+ md = {"instance-id": "IID", "dsmode": "local"}
ud = b"USER_DATA_HERE"
seed_dir = os.path.join(self.paths.seed_dir, "nocloud")
- populate_dir(seed_dir,
- {'user-data': ud, 'meta-data': yaml.safe_dump(md)})
+ populate_dir(
+ seed_dir, {"user-data": ud, "meta-data": yaml.safe_dump(md)}
+ )
- sys_cfg = {
- 'datasource': {'NoCloud': {'fs_label': None}}
- }
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}}
dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
ret = dsrc.get_data()
self.assertEqual(dsrc.userdata_raw, ud)
self.assertEqual(dsrc.metadata, md)
- self.assertEqual(dsrc.platform_type, 'lxd')
- self.assertEqual(
- dsrc.subplatform, 'seed-dir (%s)' % seed_dir)
+ self.assertEqual(dsrc.platform_type, "lxd")
+ self.assertEqual(dsrc.subplatform, "seed-dir (%s)" % seed_dir)
self.assertTrue(ret)
def test_nocloud_seed_dir_non_lxd_platform_is_nocloud(self, m_is_lxd):
"""Non-lxd environments will list nocloud as the platform."""
m_is_lxd.return_value = False
- md = {'instance-id': 'IID', 'dsmode': 'local'}
+ md = {"instance-id": "IID", "dsmode": "local"}
seed_dir = os.path.join(self.paths.seed_dir, "nocloud")
- populate_dir(seed_dir,
- {'user-data': '', 'meta-data': yaml.safe_dump(md)})
+ populate_dir(
+ seed_dir, {"user-data": "", "meta-data": yaml.safe_dump(md)}
+ )
- sys_cfg = {
- 'datasource': {'NoCloud': {'fs_label': None}}
- }
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}}
dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
self.assertTrue(dsrc.get_data())
- self.assertEqual(dsrc.platform_type, 'nocloud')
- self.assertEqual(
- dsrc.subplatform, 'seed-dir (%s)' % seed_dir)
+ self.assertEqual(dsrc.platform_type, "nocloud")
+ self.assertEqual(dsrc.subplatform, "seed-dir (%s)" % seed_dir)
def test_fs_label(self, m_is_lxd):
# find_devs_with should not be called ff fs_label is None
@@ -107,65 +107,70 @@ class TestNoCloudDataSource(CiTestCase):
pass
self.mocks.enter_context(
- mock.patch.object(util, 'find_devs_with',
- side_effect=PsuedoException))
+ mock.patch.object(
+ util, "find_devs_with", side_effect=PsuedoException
+ )
+ )
# by default, NoCloud should search for filesystems by label
- sys_cfg = {'datasource': {'NoCloud': {}}}
+ sys_cfg = {"datasource": {"NoCloud": {}}}
dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
self.assertRaises(PsuedoException, dsrc.get_data)
# but disabling searching should just end up with None found
- sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}}
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}}
dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
ret = dsrc.get_data()
self.assertFalse(ret)
def test_fs_config_lowercase_label(self, m_is_lxd):
- self._test_fs_config_is_read('cidata', 'cidata')
+ self._test_fs_config_is_read("cidata", "cidata")
def test_fs_config_uppercase_label(self, m_is_lxd):
- self._test_fs_config_is_read('CIDATA', 'cidata')
+ self._test_fs_config_is_read("CIDATA", "cidata")
def test_fs_config_lowercase_label_search_uppercase(self, m_is_lxd):
- self._test_fs_config_is_read('cidata', 'CIDATA')
+ self._test_fs_config_is_read("cidata", "CIDATA")
def test_fs_config_uppercase_label_search_uppercase(self, m_is_lxd):
- self._test_fs_config_is_read('CIDATA', 'CIDATA')
+ self._test_fs_config_is_read("CIDATA", "CIDATA")
def test_no_datasource_expected(self, m_is_lxd):
# no source should be found if no cmdline, config, and fs_label=None
- sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}}
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}}
dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
self.assertFalse(dsrc.get_data())
def test_seed_in_config(self, m_is_lxd):
data = {
- 'fs_label': None,
- 'meta-data': yaml.safe_dump({'instance-id': 'IID'}),
- 'user-data': b"USER_DATA_RAW",
+ "fs_label": None,
+ "meta-data": yaml.safe_dump({"instance-id": "IID"}),
+ "user-data": b"USER_DATA_RAW",
}
- sys_cfg = {'datasource': {'NoCloud': data}}
+ sys_cfg = {"datasource": {"NoCloud": data}}
dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
ret = dsrc.get_data()
self.assertEqual(dsrc.userdata_raw, b"USER_DATA_RAW")
- self.assertEqual(dsrc.metadata.get('instance-id'), 'IID')
+ self.assertEqual(dsrc.metadata.get("instance-id"), "IID")
self.assertTrue(ret)
def test_nocloud_seed_with_vendordata(self, m_is_lxd):
- md = {'instance-id': 'IID', 'dsmode': 'local'}
+ md = {"instance-id": "IID", "dsmode": "local"}
ud = b"USER_DATA_HERE"
vd = b"THIS IS MY VENDOR_DATA"
- populate_dir(os.path.join(self.paths.seed_dir, "nocloud"),
- {'user-data': ud, 'meta-data': yaml.safe_dump(md),
- 'vendor-data': vd})
+ populate_dir(
+ os.path.join(self.paths.seed_dir, "nocloud"),
+ {
+ "user-data": ud,
+ "meta-data": yaml.safe_dump(md),
+ "vendor-data": vd,
+ },
+ )
- sys_cfg = {
- 'datasource': {'NoCloud': {'fs_label': None}}
- }
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}}
dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
ret = dsrc.get_data()
@@ -175,10 +180,12 @@ class TestNoCloudDataSource(CiTestCase):
self.assertTrue(ret)
def test_nocloud_no_vendordata(self, m_is_lxd):
- populate_dir(os.path.join(self.paths.seed_dir, "nocloud"),
- {'user-data': b"ud", 'meta-data': "instance-id: IID\n"})
+ populate_dir(
+ os.path.join(self.paths.seed_dir, "nocloud"),
+ {"user-data": b"ud", "meta-data": "instance-id: IID\n"},
+ )
- sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}}
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}}
dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
ret = dsrc.get_data()
@@ -189,23 +196,28 @@ class TestNoCloudDataSource(CiTestCase):
def test_metadata_network_interfaces(self, m_is_lxd):
gateway = "103.225.10.1"
md = {
- 'instance-id': 'i-abcd',
- 'local-hostname': 'hostname1',
- 'network-interfaces': textwrap.dedent("""\
+ "instance-id": "i-abcd",
+ "local-hostname": "hostname1",
+ "network-interfaces": textwrap.dedent(
+ """\
auto eth0
iface eth0 inet static
hwaddr 00:16:3e:70:e1:04
address 103.225.10.12
netmask 255.255.255.0
- gateway """ + gateway + """
- dns-servers 8.8.8.8""")}
+ gateway """
+ + gateway
+ + """
+ dns-servers 8.8.8.8"""
+ ),
+ }
populate_dir(
os.path.join(self.paths.seed_dir, "nocloud"),
- {'user-data': b"ud",
- 'meta-data': yaml.dump(md) + "\n"})
+ {"user-data": b"ud", "meta-data": yaml.dump(md) + "\n"},
+ )
- sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}}
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}}
dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
ret = dsrc.get_data()
@@ -215,16 +227,26 @@ class TestNoCloudDataSource(CiTestCase):
def test_metadata_network_config(self, m_is_lxd):
# network-config needs to get into network_config
- netconf = {'version': 1,
- 'config': [{'type': 'physical', 'name': 'interface0',
- 'subnets': [{'type': 'dhcp'}]}]}
+ netconf = {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": "interface0",
+ "subnets": [{"type": "dhcp"}],
+ }
+ ],
+ }
populate_dir(
os.path.join(self.paths.seed_dir, "nocloud"),
- {'user-data': b"ud",
- 'meta-data': "instance-id: IID\n",
- 'network-config': yaml.dump(netconf) + "\n"})
+ {
+ "user-data": b"ud",
+ "meta-data": "instance-id: IID\n",
+ "network-config": yaml.dump(netconf) + "\n",
+ },
+ )
- sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}}
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}}
dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
ret = dsrc.get_data()
@@ -233,14 +255,17 @@ class TestNoCloudDataSource(CiTestCase):
def test_metadata_network_config_with_toplevel_network(self, m_is_lxd):
"""network-config may have 'network' top level key."""
- netconf = {'config': 'disabled'}
+ netconf = {"config": "disabled"}
populate_dir(
os.path.join(self.paths.seed_dir, "nocloud"),
- {'user-data': b"ud",
- 'meta-data': "instance-id: IID\n",
- 'network-config': yaml.dump({'network': netconf}) + "\n"})
+ {
+ "user-data": b"ud",
+ "meta-data": "instance-id: IID\n",
+ "network-config": yaml.dump({"network": netconf}) + "\n",
+ },
+ )
- sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}}
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}}
dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
ret = dsrc.get_data()
@@ -251,27 +276,42 @@ class TestNoCloudDataSource(CiTestCase):
# network-config should override meta-data/network-interfaces
gateway = "103.225.10.1"
md = {
- 'instance-id': 'i-abcd',
- 'local-hostname': 'hostname1',
- 'network-interfaces': textwrap.dedent("""\
+ "instance-id": "i-abcd",
+ "local-hostname": "hostname1",
+ "network-interfaces": textwrap.dedent(
+ """\
auto eth0
iface eth0 inet static
hwaddr 00:16:3e:70:e1:04
address 103.225.10.12
netmask 255.255.255.0
- gateway """ + gateway + """
- dns-servers 8.8.8.8""")}
+ gateway """
+ + gateway
+ + """
+ dns-servers 8.8.8.8"""
+ ),
+ }
- netconf = {'version': 1,
- 'config': [{'type': 'physical', 'name': 'interface0',
- 'subnets': [{'type': 'dhcp'}]}]}
+ netconf = {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": "interface0",
+ "subnets": [{"type": "dhcp"}],
+ }
+ ],
+ }
populate_dir(
os.path.join(self.paths.seed_dir, "nocloud"),
- {'user-data': b"ud",
- 'meta-data': yaml.dump(md) + "\n",
- 'network-config': yaml.dump(netconf) + "\n"})
+ {
+ "user-data": b"ud",
+ "meta-data": yaml.dump(md) + "\n",
+ "network-config": yaml.dump(netconf) + "\n",
+ },
+ )
- sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}}
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}}
dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
ret = dsrc.get_data()
@@ -281,17 +321,24 @@ class TestNoCloudDataSource(CiTestCase):
@mock.patch("cloudinit.util.blkid")
def test_nocloud_get_devices_freebsd(self, m_is_lxd, fake_blkid):
- populate_dir(os.path.join(self.paths.seed_dir, "nocloud"),
- {'user-data': b"ud", 'meta-data': "instance-id: IID\n"})
+ populate_dir(
+ os.path.join(self.paths.seed_dir, "nocloud"),
+ {"user-data": b"ud", "meta-data": "instance-id: IID\n"},
+ )
- sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}}
+ sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}}
self.mocks.enter_context(
- mock.patch.object(util, 'is_FreeBSD', return_value=True))
+ mock.patch.object(util, "is_FreeBSD", return_value=True)
+ )
def _mfind_devs_with_freebsd(
- criteria=None, oformat='device',
- tag=None, no_cache=False, path=None):
+ criteria=None,
+ oformat="device",
+ tag=None,
+ no_cache=False,
+ path=None,
+ ):
if not criteria:
return ["/dev/msdosfs/foo", "/dev/iso9660/foo"]
if criteria.startswith("LABEL="):
@@ -304,17 +351,19 @@ class TestNoCloudDataSource(CiTestCase):
self.mocks.enter_context(
mock.patch.object(
- util, 'find_devs_with_freebsd',
- side_effect=_mfind_devs_with_freebsd))
+ util,
+ "find_devs_with_freebsd",
+ side_effect=_mfind_devs_with_freebsd,
+ )
+ )
dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
- ret = dsrc._get_devices('foo')
- self.assertEqual(['/dev/msdosfs/foo', '/dev/iso9660/foo'], ret)
+ ret = dsrc._get_devices("foo")
+ self.assertEqual(["/dev/msdosfs/foo", "/dev/iso9660/foo"], ret)
fake_blkid.assert_not_called()
class TestParseCommandLineData(CiTestCase):
-
def test_parse_cmdline_data_valid(self):
ds_id = "ds=nocloud"
pairs = (
@@ -322,18 +371,21 @@ class TestParseCommandLineData(CiTestCase):
("%(ds_id)s; root=/dev/foo", {}),
("%(ds_id)s", {}),
("%(ds_id)s;", {}),
- ("%(ds_id)s;s=SEED", {'seedfrom': 'SEED'}),
- ("%(ds_id)s;seedfrom=SEED;local-hostname=xhost",
- {'seedfrom': 'SEED', 'local-hostname': 'xhost'}),
- ("%(ds_id)s;h=xhost",
- {'local-hostname': 'xhost'}),
- ("%(ds_id)s;h=xhost;i=IID",
- {'local-hostname': 'xhost', 'instance-id': 'IID'}),
+ ("%(ds_id)s;s=SEED", {"seedfrom": "SEED"}),
+ (
+ "%(ds_id)s;seedfrom=SEED;local-hostname=xhost",
+ {"seedfrom": "SEED", "local-hostname": "xhost"},
+ ),
+ ("%(ds_id)s;h=xhost", {"local-hostname": "xhost"}),
+ (
+ "%(ds_id)s;h=xhost;i=IID",
+ {"local-hostname": "xhost", "instance-id": "IID"},
+ ),
)
for (fmt, expected) in pairs:
fill = {}
- cmdline = fmt % {'ds_id': ds_id}
+ cmdline = fmt % {"ds_id": ds_id}
ret = parse_cmdline_data(ds_id=ds_id, fill=fill, cmdline=cmdline)
self.assertEqual(expected, fill)
self.assertTrue(ret)
@@ -358,36 +410,44 @@ class TestParseCommandLineData(CiTestCase):
class TestMaybeRemoveToplevelNetwork(CiTestCase):
"""test _maybe_remove_top_network function."""
- basecfg = [{'type': 'physical', 'name': 'interface0',
- 'subnets': [{'type': 'dhcp'}]}]
+
+ basecfg = [
+ {
+ "type": "physical",
+ "name": "interface0",
+ "subnets": [{"type": "dhcp"}],
+ }
+ ]
def test_should_remove_safely(self):
- mcfg = {'config': self.basecfg, 'version': 1}
- self.assertEqual(mcfg, _maybe_remove_top_network({'network': mcfg}))
+ mcfg = {"config": self.basecfg, "version": 1}
+ self.assertEqual(mcfg, _maybe_remove_top_network({"network": mcfg}))
def test_no_remove_if_other_keys(self):
"""should not shift if other keys at top level."""
- mcfg = {'network': {'config': self.basecfg, 'version': 1},
- 'unknown_keyname': 'keyval'}
+ mcfg = {
+ "network": {"config": self.basecfg, "version": 1},
+ "unknown_keyname": "keyval",
+ }
self.assertEqual(mcfg, _maybe_remove_top_network(mcfg))
def test_no_remove_if_non_dict(self):
"""should not shift if not a dict."""
- mcfg = {'network': '"content here'}
+ mcfg = {"network": '"content here'}
self.assertEqual(mcfg, _maybe_remove_top_network(mcfg))
def test_no_remove_if_missing_config_or_version(self):
"""should not shift unless network entry has config and version."""
- mcfg = {'network': {'config': self.basecfg}}
+ mcfg = {"network": {"config": self.basecfg}}
self.assertEqual(mcfg, _maybe_remove_top_network(mcfg))
- mcfg = {'network': {'version': 1}}
+ mcfg = {"network": {"version": 1}}
self.assertEqual(mcfg, _maybe_remove_top_network(mcfg))
def test_remove_with_config_disabled(self):
"""network/config=disabled should be shifted."""
- mcfg = {'config': 'disabled'}
- self.assertEqual(mcfg, _maybe_remove_top_network({'network': mcfg}))
+ mcfg = {"config": "disabled"}
+ self.assertEqual(mcfg, _maybe_remove_top_network({"network": mcfg}))
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/sources/test_opennebula.py
index 9c6070a5..e05c4749 100644
--- a/tests/unittests/test_datasource/test_opennebula.py
+++ b/tests/unittests/sources/test_opennebula.py
@@ -1,62 +1,61 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import helpers
-from cloudinit.sources import DataSourceOpenNebula as ds
-from cloudinit import util
-from cloudinit.tests.helpers import mock, populate_dir, CiTestCase
-
import os
import pwd
import unittest
import pytest
+from cloudinit import helpers, util
+from cloudinit.sources import DataSourceOpenNebula as ds
+from tests.unittests.helpers import CiTestCase, mock, populate_dir
TEST_VARS = {
- 'VAR1': 'single',
- 'VAR2': 'double word',
- 'VAR3': 'multi\nline\n',
- 'VAR4': "'single'",
- 'VAR5': "'double word'",
- 'VAR6': "'multi\nline\n'",
- 'VAR7': 'single\\t',
- 'VAR8': 'double\\tword',
- 'VAR9': 'multi\\t\nline\n',
- 'VAR10': '\\', # expect '\'
- 'VAR11': '\'', # expect '
- 'VAR12': '$', # expect $
+ "VAR1": "single",
+ "VAR2": "double word",
+ "VAR3": "multi\nline\n",
+ "VAR4": "'single'",
+ "VAR5": "'double word'",
+ "VAR6": "'multi\nline\n'",
+ "VAR7": "single\\t",
+ "VAR8": "double\\tword",
+ "VAR9": "multi\\t\nline\n",
+ "VAR10": "\\", # expect '\'
+ "VAR11": "'", # expect '
+ "VAR12": "$", # expect $
}
-INVALID_CONTEXT = ';'
-USER_DATA = '#cloud-config\napt_upgrade: true'
-SSH_KEY = 'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460-%i'
-HOSTNAME = 'foo.example.com'
-PUBLIC_IP = '10.0.0.3'
-MACADDR = '02:00:0a:12:01:01'
-IP_BY_MACADDR = '10.18.1.1'
-IP4_PREFIX = '24'
-IP6_GLOBAL = '2001:db8:1:0:400:c0ff:fea8:1ba'
-IP6_ULA = 'fd01:dead:beaf:0:400:c0ff:fea8:1ba'
-IP6_GW = '2001:db8:1::ffff'
-IP6_PREFIX = '48'
+INVALID_CONTEXT = ";"
+USER_DATA = "#cloud-config\napt_upgrade: true"
+SSH_KEY = "ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460-%i"
+HOSTNAME = "foo.example.com"
+PUBLIC_IP = "10.0.0.3"
+MACADDR = "02:00:0a:12:01:01"
+IP_BY_MACADDR = "10.18.1.1"
+IP4_PREFIX = "24"
+IP6_GLOBAL = "2001:db8:1:0:400:c0ff:fea8:1ba"
+IP6_ULA = "fd01:dead:beaf:0:400:c0ff:fea8:1ba"
+IP6_GW = "2001:db8:1::ffff"
+IP6_PREFIX = "48"
DS_PATH = "cloudinit.sources.DataSourceOpenNebula"
class TestOpenNebulaDataSource(CiTestCase):
parsed_user = None
- allowed_subp = ['bash']
+ allowed_subp = ["bash"]
def setUp(self):
super(TestOpenNebulaDataSource, self).setUp()
self.tmp = self.tmp_dir()
self.paths = helpers.Paths(
- {'cloud_dir': self.tmp, 'run_dir': self.tmp})
+ {"cloud_dir": self.tmp, "run_dir": self.tmp}
+ )
# defaults for few tests
self.ds = ds.DataSourceOpenNebula
self.seed_dir = os.path.join(self.paths.seed_dir, "opennebula")
- self.sys_cfg = {'datasource': {'OpenNebula': {'dsmode': 'local'}}}
+ self.sys_cfg = {"datasource": {"OpenNebula": {"dsmode": "local"}}}
# we don't want 'sudo' called in tests. so we patch switch_user_cmd
def my_switch_user_cmd(user):
@@ -86,7 +85,7 @@ class TestOpenNebulaDataSource(CiTestCase):
try:
# dont' try to lookup for CDs
util.find_devs_with = lambda n: []
- populate_dir(self.seed_dir, {'context.sh': INVALID_CONTEXT})
+ populate_dir(self.seed_dir, {"context.sh": INVALID_CONTEXT})
dsrc = self.ds(sys_cfg=self.sys_cfg, distro=None, paths=self.paths)
self.assertRaises(ds.BrokenContextDiskDir, dsrc.get_data)
finally:
@@ -97,18 +96,19 @@ class TestOpenNebulaDataSource(CiTestCase):
try:
# generate non-existing system user name
sys_cfg = self.sys_cfg
- invalid_user = 'invalid'
- while not sys_cfg['datasource']['OpenNebula'].get('parseuser'):
+ invalid_user = "invalid"
+ while not sys_cfg["datasource"]["OpenNebula"].get("parseuser"):
try:
pwd.getpwnam(invalid_user)
- invalid_user += 'X'
+ invalid_user += "X"
except KeyError:
- sys_cfg['datasource']['OpenNebula']['parseuser'] = \
- invalid_user
+ sys_cfg["datasource"]["OpenNebula"][
+ "parseuser"
+ ] = invalid_user
# dont' try to lookup for CDs
util.find_devs_with = lambda n: []
- populate_context_dir(self.seed_dir, {'KEY1': 'val1'})
+ populate_context_dir(self.seed_dir, {"KEY1": "val1"})
dsrc = self.ds(sys_cfg=sys_cfg, distro=None, paths=self.paths)
self.assertRaises(ds.BrokenContextDiskDir, dsrc.get_data)
finally:
@@ -119,226 +119,265 @@ class TestOpenNebulaDataSource(CiTestCase):
try:
# dont' try to lookup for CDs
util.find_devs_with = lambda n: []
- populate_context_dir(self.seed_dir, {'KEY1': 'val1'})
+ populate_context_dir(self.seed_dir, {"KEY1": "val1"})
dsrc = self.ds(sys_cfg=self.sys_cfg, distro=None, paths=self.paths)
ret = dsrc.get_data()
self.assertTrue(ret)
finally:
util.find_devs_with = orig_find_devs_with
- self.assertEqual('opennebula', dsrc.cloud_name)
- self.assertEqual('opennebula', dsrc.platform_type)
+ self.assertEqual("opennebula", dsrc.cloud_name)
+ self.assertEqual("opennebula", dsrc.platform_type)
self.assertEqual(
- 'seed-dir (%s/seed/opennebula)' % self.tmp, dsrc.subplatform)
+ "seed-dir (%s/seed/opennebula)" % self.tmp, dsrc.subplatform
+ )
def test_seed_dir_non_contextdisk(self):
- self.assertRaises(ds.NonContextDiskDir, ds.read_context_disk_dir,
- self.seed_dir, mock.Mock())
+ self.assertRaises(
+ ds.NonContextDiskDir,
+ ds.read_context_disk_dir,
+ self.seed_dir,
+ mock.Mock(),
+ )
def test_seed_dir_empty1_context(self):
- populate_dir(self.seed_dir, {'context.sh': ''})
+ populate_dir(self.seed_dir, {"context.sh": ""})
results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
- self.assertIsNone(results['userdata'])
- self.assertEqual(results['metadata'], {})
+ self.assertIsNone(results["userdata"])
+ self.assertEqual(results["metadata"], {})
def test_seed_dir_empty2_context(self):
populate_context_dir(self.seed_dir, {})
results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
- self.assertIsNone(results['userdata'])
- self.assertEqual(results['metadata'], {})
+ self.assertIsNone(results["userdata"])
+ self.assertEqual(results["metadata"], {})
def test_seed_dir_broken_context(self):
- populate_dir(self.seed_dir, {'context.sh': INVALID_CONTEXT})
+ populate_dir(self.seed_dir, {"context.sh": INVALID_CONTEXT})
- self.assertRaises(ds.BrokenContextDiskDir,
- ds.read_context_disk_dir,
- self.seed_dir, mock.Mock())
+ self.assertRaises(
+ ds.BrokenContextDiskDir,
+ ds.read_context_disk_dir,
+ self.seed_dir,
+ mock.Mock(),
+ )
def test_context_parser(self):
populate_context_dir(self.seed_dir, TEST_VARS)
results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
- self.assertTrue('metadata' in results)
- self.assertEqual(TEST_VARS, results['metadata'])
+ self.assertTrue("metadata" in results)
+ self.assertEqual(TEST_VARS, results["metadata"])
def test_ssh_key(self):
- public_keys = ['first key', 'second key']
+ public_keys = ["first key", "second key"]
for c in range(4):
- for k in ('SSH_KEY', 'SSH_PUBLIC_KEY'):
+ for k in ("SSH_KEY", "SSH_PUBLIC_KEY"):
my_d = os.path.join(self.tmp, "%s-%i" % (k, c))
- populate_context_dir(my_d, {k: '\n'.join(public_keys)})
+ populate_context_dir(my_d, {k: "\n".join(public_keys)})
results = ds.read_context_disk_dir(my_d, mock.Mock())
- self.assertTrue('metadata' in results)
- self.assertTrue('public-keys' in results['metadata'])
- self.assertEqual(public_keys,
- results['metadata']['public-keys'])
+ self.assertTrue("metadata" in results)
+ self.assertTrue("public-keys" in results["metadata"])
+ self.assertEqual(
+ public_keys, results["metadata"]["public-keys"]
+ )
public_keys.append(SSH_KEY % (c + 1,))
def test_user_data_plain(self):
- for k in ('USER_DATA', 'USERDATA'):
+ for k in ("USER_DATA", "USERDATA"):
my_d = os.path.join(self.tmp, k)
- populate_context_dir(my_d, {k: USER_DATA,
- 'USERDATA_ENCODING': ''})
+ populate_context_dir(my_d, {k: USER_DATA, "USERDATA_ENCODING": ""})
results = ds.read_context_disk_dir(my_d, mock.Mock())
- self.assertTrue('userdata' in results)
- self.assertEqual(USER_DATA, results['userdata'])
+ self.assertTrue("userdata" in results)
+ self.assertEqual(USER_DATA, results["userdata"])
def test_user_data_encoding_required_for_decode(self):
b64userdata = util.b64e(USER_DATA)
- for k in ('USER_DATA', 'USERDATA'):
+ for k in ("USER_DATA", "USERDATA"):
my_d = os.path.join(self.tmp, k)
populate_context_dir(my_d, {k: b64userdata})
results = ds.read_context_disk_dir(my_d, mock.Mock())
- self.assertTrue('userdata' in results)
- self.assertEqual(b64userdata, results['userdata'])
+ self.assertTrue("userdata" in results)
+ self.assertEqual(b64userdata, results["userdata"])
def test_user_data_base64_encoding(self):
- for k in ('USER_DATA', 'USERDATA'):
+ for k in ("USER_DATA", "USERDATA"):
my_d = os.path.join(self.tmp, k)
- populate_context_dir(my_d, {k: util.b64e(USER_DATA),
- 'USERDATA_ENCODING': 'base64'})
+ populate_context_dir(
+ my_d, {k: util.b64e(USER_DATA), "USERDATA_ENCODING": "base64"}
+ )
results = ds.read_context_disk_dir(my_d, mock.Mock())
- self.assertTrue('userdata' in results)
- self.assertEqual(USER_DATA, results['userdata'])
+ self.assertTrue("userdata" in results)
+ self.assertEqual(USER_DATA, results["userdata"])
@mock.patch(DS_PATH + ".get_physical_nics_by_mac")
def test_hostname(self, m_get_phys_by_mac):
- for dev in ('eth0', 'ens3'):
+ for dev in ("eth0", "ens3"):
m_get_phys_by_mac.return_value = {MACADDR: dev}
- for k in ('HOSTNAME', 'PUBLIC_IP', 'IP_PUBLIC', 'ETH0_IP'):
+ for k in (
+ "SET_HOSTNAME",
+ "HOSTNAME",
+ "PUBLIC_IP",
+ "IP_PUBLIC",
+ "ETH0_IP",
+ ):
my_d = os.path.join(self.tmp, k)
populate_context_dir(my_d, {k: PUBLIC_IP})
results = ds.read_context_disk_dir(my_d, mock.Mock())
- self.assertTrue('metadata' in results)
- self.assertTrue('local-hostname' in results['metadata'])
+ self.assertTrue("metadata" in results)
+ self.assertTrue("local-hostname" in results["metadata"])
self.assertEqual(
- PUBLIC_IP, results['metadata']['local-hostname'])
+ PUBLIC_IP, results["metadata"]["local-hostname"]
+ )
@mock.patch(DS_PATH + ".get_physical_nics_by_mac")
def test_network_interfaces(self, m_get_phys_by_mac):
- for dev in ('eth0', 'ens3'):
+ for dev in ("eth0", "ens3"):
m_get_phys_by_mac.return_value = {MACADDR: dev}
# without ETH0_MAC
# for Older OpenNebula?
- populate_context_dir(self.seed_dir, {'ETH0_IP': IP_BY_MACADDR})
+ populate_context_dir(self.seed_dir, {"ETH0_IP": IP_BY_MACADDR})
results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
- self.assertTrue('network-interfaces' in results)
+ self.assertTrue("network-interfaces" in results)
self.assertTrue(
- IP_BY_MACADDR + '/' + IP4_PREFIX in
- results['network-interfaces']['ethernets'][dev]['addresses'])
+ IP_BY_MACADDR + "/" + IP4_PREFIX
+ in results["network-interfaces"]["ethernets"][dev]["addresses"]
+ )
# ETH0_IP and ETH0_MAC
populate_context_dir(
- self.seed_dir, {'ETH0_IP': IP_BY_MACADDR, 'ETH0_MAC': MACADDR})
+ self.seed_dir, {"ETH0_IP": IP_BY_MACADDR, "ETH0_MAC": MACADDR}
+ )
results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
- self.assertTrue('network-interfaces' in results)
+ self.assertTrue("network-interfaces" in results)
self.assertTrue(
- IP_BY_MACADDR + '/' + IP4_PREFIX in
- results['network-interfaces']['ethernets'][dev]['addresses'])
+ IP_BY_MACADDR + "/" + IP4_PREFIX
+ in results["network-interfaces"]["ethernets"][dev]["addresses"]
+ )
# ETH0_IP with empty string and ETH0_MAC
# in the case of using Virtual Network contains
# "AR = [ TYPE = ETHER ]"
populate_context_dir(
- self.seed_dir, {'ETH0_IP': '', 'ETH0_MAC': MACADDR})
+ self.seed_dir, {"ETH0_IP": "", "ETH0_MAC": MACADDR}
+ )
results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
- self.assertTrue('network-interfaces' in results)
+ self.assertTrue("network-interfaces" in results)
self.assertTrue(
- IP_BY_MACADDR + '/' + IP4_PREFIX in
- results['network-interfaces']['ethernets'][dev]['addresses'])
+ IP_BY_MACADDR + "/" + IP4_PREFIX
+ in results["network-interfaces"]["ethernets"][dev]["addresses"]
+ )
# ETH0_MASK
populate_context_dir(
- self.seed_dir, {
- 'ETH0_IP': IP_BY_MACADDR,
- 'ETH0_MAC': MACADDR,
- 'ETH0_MASK': '255.255.0.0'
- })
+ self.seed_dir,
+ {
+ "ETH0_IP": IP_BY_MACADDR,
+ "ETH0_MAC": MACADDR,
+ "ETH0_MASK": "255.255.0.0",
+ },
+ )
results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
- self.assertTrue('network-interfaces' in results)
+ self.assertTrue("network-interfaces" in results)
self.assertTrue(
- IP_BY_MACADDR + '/16' in
- results['network-interfaces']['ethernets'][dev]['addresses'])
+ IP_BY_MACADDR + "/16"
+ in results["network-interfaces"]["ethernets"][dev]["addresses"]
+ )
# ETH0_MASK with empty string
populate_context_dir(
- self.seed_dir, {
- 'ETH0_IP': IP_BY_MACADDR,
- 'ETH0_MAC': MACADDR,
- 'ETH0_MASK': ''
- })
+ self.seed_dir,
+ {
+ "ETH0_IP": IP_BY_MACADDR,
+ "ETH0_MAC": MACADDR,
+ "ETH0_MASK": "",
+ },
+ )
results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
- self.assertTrue('network-interfaces' in results)
+ self.assertTrue("network-interfaces" in results)
self.assertTrue(
- IP_BY_MACADDR + '/' + IP4_PREFIX in
- results['network-interfaces']['ethernets'][dev]['addresses'])
+ IP_BY_MACADDR + "/" + IP4_PREFIX
+ in results["network-interfaces"]["ethernets"][dev]["addresses"]
+ )
# ETH0_IP6
populate_context_dir(
- self.seed_dir, {
- 'ETH0_IP6': IP6_GLOBAL,
- 'ETH0_MAC': MACADDR,
- })
+ self.seed_dir,
+ {
+ "ETH0_IP6": IP6_GLOBAL,
+ "ETH0_MAC": MACADDR,
+ },
+ )
results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
- self.assertTrue('network-interfaces' in results)
+ self.assertTrue("network-interfaces" in results)
self.assertTrue(
- IP6_GLOBAL + '/64' in
- results['network-interfaces']['ethernets'][dev]['addresses'])
+ IP6_GLOBAL + "/64"
+ in results["network-interfaces"]["ethernets"][dev]["addresses"]
+ )
# ETH0_IP6_ULA
populate_context_dir(
- self.seed_dir, {
- 'ETH0_IP6_ULA': IP6_ULA,
- 'ETH0_MAC': MACADDR,
- })
+ self.seed_dir,
+ {
+ "ETH0_IP6_ULA": IP6_ULA,
+ "ETH0_MAC": MACADDR,
+ },
+ )
results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
- self.assertTrue('network-interfaces' in results)
+ self.assertTrue("network-interfaces" in results)
self.assertTrue(
- IP6_ULA + '/64' in
- results['network-interfaces']['ethernets'][dev]['addresses'])
+ IP6_ULA + "/64"
+ in results["network-interfaces"]["ethernets"][dev]["addresses"]
+ )
# ETH0_IP6 and ETH0_IP6_PREFIX_LENGTH
populate_context_dir(
- self.seed_dir, {
- 'ETH0_IP6': IP6_GLOBAL,
- 'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX,
- 'ETH0_MAC': MACADDR,
- })
+ self.seed_dir,
+ {
+ "ETH0_IP6": IP6_GLOBAL,
+ "ETH0_IP6_PREFIX_LENGTH": IP6_PREFIX,
+ "ETH0_MAC": MACADDR,
+ },
+ )
results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
- self.assertTrue('network-interfaces' in results)
+ self.assertTrue("network-interfaces" in results)
self.assertTrue(
- IP6_GLOBAL + '/' + IP6_PREFIX in
- results['network-interfaces']['ethernets'][dev]['addresses'])
+ IP6_GLOBAL + "/" + IP6_PREFIX
+ in results["network-interfaces"]["ethernets"][dev]["addresses"]
+ )
# ETH0_IP6 and ETH0_IP6_PREFIX_LENGTH with empty string
populate_context_dir(
- self.seed_dir, {
- 'ETH0_IP6': IP6_GLOBAL,
- 'ETH0_IP6_PREFIX_LENGTH': '',
- 'ETH0_MAC': MACADDR,
- })
+ self.seed_dir,
+ {
+ "ETH0_IP6": IP6_GLOBAL,
+ "ETH0_IP6_PREFIX_LENGTH": "",
+ "ETH0_MAC": MACADDR,
+ },
+ )
results = ds.read_context_disk_dir(self.seed_dir, mock.Mock())
- self.assertTrue('network-interfaces' in results)
+ self.assertTrue("network-interfaces" in results)
self.assertTrue(
- IP6_GLOBAL + '/64' in
- results['network-interfaces']['ethernets'][dev]['addresses'])
+ IP6_GLOBAL + "/64"
+ in results["network-interfaces"]["ethernets"][dev]["addresses"]
+ )
def test_find_candidates(self):
def my_devs_with(criteria):
@@ -351,25 +390,28 @@ class TestOpenNebulaDataSource(CiTestCase):
orig_find_devs_with = util.find_devs_with
try:
util.find_devs_with = my_devs_with
- self.assertEqual(["/dev/sdb", "/dev/sr0", "/dev/vdb"],
- ds.find_candidate_devs())
+ self.assertEqual(
+ ["/dev/sdb", "/dev/sr0", "/dev/vdb"], ds.find_candidate_devs()
+ )
finally:
util.find_devs_with = orig_find_devs_with
-@mock.patch(DS_PATH + '.net.get_interfaces_by_mac', mock.Mock(return_value={}))
+@mock.patch(DS_PATH + ".net.get_interfaces_by_mac", mock.Mock(return_value={}))
class TestOpenNebulaNetwork(unittest.TestCase):
- system_nics = ('eth0', 'ens3')
+ system_nics = ("eth0", "ens3")
def test_context_devname(self):
"""Verify context_devname correctly returns mac and name."""
context = {
- 'ETH0_MAC': '02:00:0a:12:01:01',
- 'ETH1_MAC': '02:00:0a:12:0f:0f', }
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "ETH1_MAC": "02:00:0a:12:0f:0f",
+ }
expected = {
- '02:00:0a:12:01:01': 'ETH0',
- '02:00:0a:12:0f:0f': 'ETH1', }
+ "02:00:0a:12:01:01": "ETH0",
+ "02:00:0a:12:0f:0f": "ETH1",
+ }
net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(expected, net.context_devname)
@@ -379,28 +421,30 @@ class TestOpenNebulaNetwork(unittest.TestCase):
and search domains.
"""
context = {
- 'DNS': '1.2.3.8',
- 'ETH0_DNS': '1.2.3.6 1.2.3.7',
- 'ETH0_SEARCH_DOMAIN': 'example.com example.org', }
+ "DNS": "1.2.3.8",
+ "ETH0_DNS": "1.2.3.6 1.2.3.7",
+ "ETH0_SEARCH_DOMAIN": "example.com example.org",
+ }
expected = {
- 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'],
- 'search': ['example.com', 'example.org']}
+ "addresses": ["1.2.3.6", "1.2.3.7", "1.2.3.8"],
+ "search": ["example.com", "example.org"],
+ }
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_nameservers('eth0')
+ val = net.get_nameservers("eth0")
self.assertEqual(expected, val)
def test_get_mtu(self):
"""Verify get_mtu('device') correctly returns MTU size."""
- context = {'ETH0_MTU': '1280'}
+ context = {"ETH0_MTU": "1280"}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_mtu('eth0')
- self.assertEqual('1280', val)
+ val = net.get_mtu("eth0")
+ self.assertEqual("1280", val)
def test_get_ip(self):
"""Verify get_ip('device') correctly returns IPv4 address."""
- context = {'ETH0_IP': PUBLIC_IP}
+ context = {"ETH0_IP": PUBLIC_IP}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_ip('eth0', MACADDR)
+ val = net.get_ip("eth0", MACADDR)
self.assertEqual(PUBLIC_IP, val)
def test_get_ip_emptystring(self):
@@ -409,9 +453,9 @@ class TestOpenNebulaNetwork(unittest.TestCase):
It returns IP address created by MAC address if ETH0_IP has empty
string.
"""
- context = {'ETH0_IP': ''}
+ context = {"ETH0_IP": ""}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_ip('eth0', MACADDR)
+ val = net.get_ip("eth0", MACADDR)
self.assertEqual(IP_BY_MACADDR, val)
def test_get_ip6(self):
@@ -420,11 +464,12 @@ class TestOpenNebulaNetwork(unittest.TestCase):
In this case, IPv6 address is Given by ETH0_IP6.
"""
context = {
- 'ETH0_IP6': IP6_GLOBAL,
- 'ETH0_IP6_ULA': '', }
+ "ETH0_IP6": IP6_GLOBAL,
+ "ETH0_IP6_ULA": "",
+ }
expected = [IP6_GLOBAL]
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_ip6('eth0')
+ val = net.get_ip6("eth0")
self.assertEqual(expected, val)
def test_get_ip6_ula(self):
@@ -433,11 +478,12 @@ class TestOpenNebulaNetwork(unittest.TestCase):
In this case, IPv6 address is Given by ETH0_IP6_ULA.
"""
context = {
- 'ETH0_IP6': '',
- 'ETH0_IP6_ULA': IP6_ULA, }
+ "ETH0_IP6": "",
+ "ETH0_IP6_ULA": IP6_ULA,
+ }
expected = [IP6_ULA]
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_ip6('eth0')
+ val = net.get_ip6("eth0")
self.assertEqual(expected, val)
def test_get_ip6_dual(self):
@@ -446,20 +492,21 @@ class TestOpenNebulaNetwork(unittest.TestCase):
In this case, IPv6 addresses are Given by ETH0_IP6 and ETH0_IP6_ULA.
"""
context = {
- 'ETH0_IP6': IP6_GLOBAL,
- 'ETH0_IP6_ULA': IP6_ULA, }
+ "ETH0_IP6": IP6_GLOBAL,
+ "ETH0_IP6_ULA": IP6_ULA,
+ }
expected = [IP6_GLOBAL, IP6_ULA]
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_ip6('eth0')
+ val = net.get_ip6("eth0")
self.assertEqual(expected, val)
def test_get_ip6_prefix(self):
"""
Verify get_ip6_prefix('device') correctly returns IPv6 prefix.
"""
- context = {'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX}
+ context = {"ETH0_IP6_PREFIX_LENGTH": IP6_PREFIX}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_ip6_prefix('eth0')
+ val = net.get_ip6_prefix("eth0")
self.assertEqual(IP6_PREFIX, val)
def test_get_ip6_prefix_emptystring(self):
@@ -468,58 +515,59 @@ class TestOpenNebulaNetwork(unittest.TestCase):
It returns default value '64' if ETH0_IP6_PREFIX_LENGTH has empty
string.
"""
- context = {'ETH0_IP6_PREFIX_LENGTH': ''}
+ context = {"ETH0_IP6_PREFIX_LENGTH": ""}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_ip6_prefix('eth0')
- self.assertEqual('64', val)
+ val = net.get_ip6_prefix("eth0")
+ self.assertEqual("64", val)
def test_get_gateway(self):
"""
Verify get_gateway('device') correctly returns IPv4 default gateway
address.
"""
- context = {'ETH0_GATEWAY': '1.2.3.5'}
+ context = {"ETH0_GATEWAY": "1.2.3.5"}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_gateway('eth0')
- self.assertEqual('1.2.3.5', val)
+ val = net.get_gateway("eth0")
+ self.assertEqual("1.2.3.5", val)
def test_get_gateway6(self):
"""
Verify get_gateway6('device') correctly returns IPv6 default gateway
address.
"""
- context = {'ETH0_GATEWAY6': IP6_GW}
- net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_gateway6('eth0')
- self.assertEqual(IP6_GW, val)
+ for k in ("GATEWAY6", "IP6_GATEWAY"):
+ context = {"ETH0_" + k: IP6_GW}
+ net = ds.OpenNebulaNetwork(context, mock.Mock())
+ val = net.get_gateway6("eth0")
+ self.assertEqual(IP6_GW, val)
def test_get_mask(self):
"""
Verify get_mask('device') correctly returns IPv4 subnet mask.
"""
- context = {'ETH0_MASK': '255.255.0.0'}
+ context = {"ETH0_MASK": "255.255.0.0"}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_mask('eth0')
- self.assertEqual('255.255.0.0', val)
+ val = net.get_mask("eth0")
+ self.assertEqual("255.255.0.0", val)
def test_get_mask_emptystring(self):
"""
Verify get_mask('device') correctly returns IPv4 subnet mask.
It returns default value '255.255.255.0' if ETH0_MASK has empty string.
"""
- context = {'ETH0_MASK': ''}
+ context = {"ETH0_MASK": ""}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_mask('eth0')
- self.assertEqual('255.255.255.0', val)
+ val = net.get_mask("eth0")
+ self.assertEqual("255.255.255.0", val)
def test_get_network(self):
"""
Verify get_network('device') correctly returns IPv4 network address.
"""
- context = {'ETH0_NETWORK': '1.2.3.0'}
+ context = {"ETH0_NETWORK": "1.2.3.0"}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_network('eth0', MACADDR)
- self.assertEqual('1.2.3.0', val)
+ val = net.get_network("eth0", MACADDR)
+ self.assertEqual("1.2.3.0", val)
def test_get_network_emptystring(self):
"""
@@ -527,48 +575,48 @@ class TestOpenNebulaNetwork(unittest.TestCase):
It returns network address created by MAC address if ETH0_NETWORK has
empty string.
"""
- context = {'ETH0_NETWORK': ''}
+ context = {"ETH0_NETWORK": ""}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_network('eth0', MACADDR)
- self.assertEqual('10.18.1.0', val)
+ val = net.get_network("eth0", MACADDR)
+ self.assertEqual("10.18.1.0", val)
def test_get_field(self):
"""
Verify get_field('device', 'name') returns *context* value.
"""
- context = {'ETH9_DUMMY': 'DUMMY_VALUE'}
+ context = {"ETH9_DUMMY": "DUMMY_VALUE"}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_field('eth9', 'dummy')
- self.assertEqual('DUMMY_VALUE', val)
+ val = net.get_field("eth9", "dummy")
+ self.assertEqual("DUMMY_VALUE", val)
def test_get_field_withdefaultvalue(self):
"""
Verify get_field('device', 'name', 'default value') returns *context*
value.
"""
- context = {'ETH9_DUMMY': 'DUMMY_VALUE'}
+ context = {"ETH9_DUMMY": "DUMMY_VALUE"}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_field('eth9', 'dummy', 'DEFAULT_VALUE')
- self.assertEqual('DUMMY_VALUE', val)
+ val = net.get_field("eth9", "dummy", "DEFAULT_VALUE")
+ self.assertEqual("DUMMY_VALUE", val)
def test_get_field_withdefaultvalue_emptycontext(self):
"""
Verify get_field('device', 'name', 'default value') returns *default*
value if context value is empty string.
"""
- context = {'ETH9_DUMMY': ''}
+ context = {"ETH9_DUMMY": ""}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_field('eth9', 'dummy', 'DEFAULT_VALUE')
- self.assertEqual('DEFAULT_VALUE', val)
+ val = net.get_field("eth9", "dummy", "DEFAULT_VALUE")
+ self.assertEqual("DEFAULT_VALUE", val)
def test_get_field_emptycontext(self):
"""
Verify get_field('device', 'name') returns None if context value is
empty string.
"""
- context = {'ETH9_DUMMY': ''}
+ context = {"ETH9_DUMMY": ""}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_field('eth9', 'dummy')
+ val = net.get_field("eth9", "dummy")
self.assertEqual(None, val)
def test_get_field_nonecontext(self):
@@ -576,9 +624,9 @@ class TestOpenNebulaNetwork(unittest.TestCase):
Verify get_field('device', 'name') returns None if context value is
None.
"""
- context = {'ETH9_DUMMY': None}
+ context = {"ETH9_DUMMY": None}
net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_field('eth9', 'dummy')
+ val = net.get_field("eth9", "dummy")
self.assertEqual(None, val)
@mock.patch(DS_PATH + ".get_physical_nics_by_mac")
@@ -587,31 +635,39 @@ class TestOpenNebulaNetwork(unittest.TestCase):
self.maxDiff = None
# empty ETH0_GATEWAY
context = {
- 'ETH0_MAC': '02:00:0a:12:01:01',
- 'ETH0_GATEWAY': '', }
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "ETH0_GATEWAY": "",
+ }
for nic in self.system_nics:
expected = {
- 'version': 2,
- 'ethernets': {
+ "version": 2,
+ "ethernets": {
nic: {
- 'match': {'macaddress': MACADDR},
- 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ "match": {"macaddress": MACADDR},
+ "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX],
+ }
+ },
+ }
m_get_phys_by_mac.return_value = {MACADDR: nic}
net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
# set ETH0_GATEWAY
context = {
- 'ETH0_MAC': '02:00:0a:12:01:01',
- 'ETH0_GATEWAY': '1.2.3.5', }
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "ETH0_GATEWAY": "1.2.3.5",
+ }
for nic in self.system_nics:
expected = {
- 'version': 2,
- 'ethernets': {
+ "version": 2,
+ "ethernets": {
nic: {
- 'gateway4': '1.2.3.5',
- 'match': {'macaddress': MACADDR},
- 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ "gateway4": "1.2.3.5",
+ "match": {"macaddress": MACADDR},
+ "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX],
+ }
+ },
+ }
m_get_phys_by_mac.return_value = {MACADDR: nic}
net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
@@ -622,31 +678,39 @@ class TestOpenNebulaNetwork(unittest.TestCase):
self.maxDiff = None
# empty ETH0_GATEWAY6
context = {
- 'ETH0_MAC': '02:00:0a:12:01:01',
- 'ETH0_GATEWAY6': '', }
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "ETH0_GATEWAY6": "",
+ }
for nic in self.system_nics:
expected = {
- 'version': 2,
- 'ethernets': {
+ "version": 2,
+ "ethernets": {
nic: {
- 'match': {'macaddress': MACADDR},
- 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ "match": {"macaddress": MACADDR},
+ "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX],
+ }
+ },
+ }
m_get_phys_by_mac.return_value = {MACADDR: nic}
net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
# set ETH0_GATEWAY6
context = {
- 'ETH0_MAC': '02:00:0a:12:01:01',
- 'ETH0_GATEWAY6': IP6_GW, }
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "ETH0_GATEWAY6": IP6_GW,
+ }
for nic in self.system_nics:
expected = {
- 'version': 2,
- 'ethernets': {
+ "version": 2,
+ "ethernets": {
nic: {
- 'gateway6': IP6_GW,
- 'match': {'macaddress': MACADDR},
- 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ "gateway6": IP6_GW,
+ "match": {"macaddress": MACADDR},
+ "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX],
+ }
+ },
+ }
m_get_phys_by_mac.return_value = {MACADDR: nic}
net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
@@ -657,37 +721,46 @@ class TestOpenNebulaNetwork(unittest.TestCase):
self.maxDiff = None
# empty ETH0_IP6, ETH0_IP6_ULA, ETH0_IP6_PREFIX_LENGTH
context = {
- 'ETH0_MAC': '02:00:0a:12:01:01',
- 'ETH0_IP6': '',
- 'ETH0_IP6_ULA': '',
- 'ETH0_IP6_PREFIX_LENGTH': '', }
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "ETH0_IP6": "",
+ "ETH0_IP6_ULA": "",
+ "ETH0_IP6_PREFIX_LENGTH": "",
+ }
for nic in self.system_nics:
expected = {
- 'version': 2,
- 'ethernets': {
+ "version": 2,
+ "ethernets": {
nic: {
- 'match': {'macaddress': MACADDR},
- 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ "match": {"macaddress": MACADDR},
+ "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX],
+ }
+ },
+ }
m_get_phys_by_mac.return_value = {MACADDR: nic}
net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
# set ETH0_IP6, ETH0_IP6_ULA, ETH0_IP6_PREFIX_LENGTH
context = {
- 'ETH0_MAC': '02:00:0a:12:01:01',
- 'ETH0_IP6': IP6_GLOBAL,
- 'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX,
- 'ETH0_IP6_ULA': IP6_ULA, }
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "ETH0_IP6": IP6_GLOBAL,
+ "ETH0_IP6_PREFIX_LENGTH": IP6_PREFIX,
+ "ETH0_IP6_ULA": IP6_ULA,
+ }
for nic in self.system_nics:
expected = {
- 'version': 2,
- 'ethernets': {
+ "version": 2,
+ "ethernets": {
nic: {
- 'match': {'macaddress': MACADDR},
- 'addresses': [
- IP_BY_MACADDR + '/' + IP4_PREFIX,
- IP6_GLOBAL + '/' + IP6_PREFIX,
- IP6_ULA + '/' + IP6_PREFIX]}}}
+ "match": {"macaddress": MACADDR},
+ "addresses": [
+ IP_BY_MACADDR + "/" + IP4_PREFIX,
+ IP6_GLOBAL + "/" + IP6_PREFIX,
+ IP6_ULA + "/" + IP6_PREFIX,
+ ],
+ }
+ },
+ }
m_get_phys_by_mac.return_value = {MACADDR: nic}
net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
@@ -698,37 +771,46 @@ class TestOpenNebulaNetwork(unittest.TestCase):
self.maxDiff = None
# empty DNS, ETH0_DNS, ETH0_SEARCH_DOMAIN
context = {
- 'ETH0_MAC': '02:00:0a:12:01:01',
- 'DNS': '',
- 'ETH0_DNS': '',
- 'ETH0_SEARCH_DOMAIN': '', }
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "DNS": "",
+ "ETH0_DNS": "",
+ "ETH0_SEARCH_DOMAIN": "",
+ }
for nic in self.system_nics:
expected = {
- 'version': 2,
- 'ethernets': {
+ "version": 2,
+ "ethernets": {
nic: {
- 'match': {'macaddress': MACADDR},
- 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ "match": {"macaddress": MACADDR},
+ "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX],
+ }
+ },
+ }
m_get_phys_by_mac.return_value = {MACADDR: nic}
net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
# set DNS, ETH0_DNS, ETH0_SEARCH_DOMAIN
context = {
- 'ETH0_MAC': '02:00:0a:12:01:01',
- 'DNS': '1.2.3.8',
- 'ETH0_DNS': '1.2.3.6 1.2.3.7',
- 'ETH0_SEARCH_DOMAIN': 'example.com example.org', }
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "DNS": "1.2.3.8",
+ "ETH0_DNS": "1.2.3.6 1.2.3.7",
+ "ETH0_SEARCH_DOMAIN": "example.com example.org",
+ }
for nic in self.system_nics:
expected = {
- 'version': 2,
- 'ethernets': {
+ "version": 2,
+ "ethernets": {
nic: {
- 'nameservers': {
- 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'],
- 'search': ['example.com', 'example.org']},
- 'match': {'macaddress': MACADDR},
- 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ "nameservers": {
+ "addresses": ["1.2.3.6", "1.2.3.7", "1.2.3.8"],
+ "search": ["example.com", "example.org"],
+ },
+ "match": {"macaddress": MACADDR},
+ "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX],
+ }
+ },
+ }
m_get_phys_by_mac.return_value = {MACADDR: nic}
net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
@@ -739,31 +821,39 @@ class TestOpenNebulaNetwork(unittest.TestCase):
self.maxDiff = None
# empty ETH0_MTU
context = {
- 'ETH0_MAC': '02:00:0a:12:01:01',
- 'ETH0_MTU': '', }
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "ETH0_MTU": "",
+ }
for nic in self.system_nics:
expected = {
- 'version': 2,
- 'ethernets': {
+ "version": 2,
+ "ethernets": {
nic: {
- 'match': {'macaddress': MACADDR},
- 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ "match": {"macaddress": MACADDR},
+ "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX],
+ }
+ },
+ }
m_get_phys_by_mac.return_value = {MACADDR: nic}
net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
# set ETH0_MTU
context = {
- 'ETH0_MAC': '02:00:0a:12:01:01',
- 'ETH0_MTU': '1280', }
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "ETH0_MTU": "1280",
+ }
for nic in self.system_nics:
expected = {
- 'version': 2,
- 'ethernets': {
+ "version": 2,
+ "ethernets": {
nic: {
- 'mtu': '1280',
- 'match': {'macaddress': MACADDR},
- 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ "mtu": "1280",
+ "match": {"macaddress": MACADDR},
+ "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX],
+ }
+ },
+ }
m_get_phys_by_mac.return_value = {MACADDR: nic}
net = ds.OpenNebulaNetwork(context, mock.Mock())
self.assertEqual(net.gen_conf(), expected)
@@ -774,11 +864,14 @@ class TestOpenNebulaNetwork(unittest.TestCase):
m_get_phys_by_mac.return_value = {MACADDR: nic}
net = ds.OpenNebulaNetwork({}, mock.Mock())
expected = {
- 'version': 2,
- 'ethernets': {
+ "version": 2,
+ "ethernets": {
nic: {
- 'match': {'macaddress': MACADDR},
- 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}}
+ "match": {"macaddress": MACADDR},
+ "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX],
+ }
+ },
+ }
self.assertEqual(net.gen_conf(), expected)
@@ -793,71 +886,82 @@ class TestOpenNebulaNetwork(unittest.TestCase):
def test_eth0_override(self):
self.maxDiff = None
context = {
- 'DNS': '1.2.3.8',
- 'ETH0_DNS': '1.2.3.6 1.2.3.7',
- 'ETH0_GATEWAY': '1.2.3.5',
- 'ETH0_GATEWAY6': '',
- 'ETH0_IP': IP_BY_MACADDR,
- 'ETH0_IP6': '',
- 'ETH0_IP6_PREFIX_LENGTH': '',
- 'ETH0_IP6_ULA': '',
- 'ETH0_MAC': '02:00:0a:12:01:01',
- 'ETH0_MASK': '255.255.0.0',
- 'ETH0_MTU': '',
- 'ETH0_NETWORK': '10.18.0.0',
- 'ETH0_SEARCH_DOMAIN': '',
+ "DNS": "1.2.3.8",
+ "ETH0_DNS": "1.2.3.6 1.2.3.7",
+ "ETH0_GATEWAY": "1.2.3.5",
+ "ETH0_GATEWAY6": "",
+ "ETH0_IP": IP_BY_MACADDR,
+ "ETH0_IP6": "",
+ "ETH0_IP6_PREFIX_LENGTH": "",
+ "ETH0_IP6_ULA": "",
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "ETH0_MASK": "255.255.0.0",
+ "ETH0_MTU": "",
+ "ETH0_NETWORK": "10.18.0.0",
+ "ETH0_SEARCH_DOMAIN": "",
}
for nic in self.system_nics:
- net = ds.OpenNebulaNetwork(context, mock.Mock(),
- system_nics_by_mac={MACADDR: nic})
+ net = ds.OpenNebulaNetwork(
+ context, mock.Mock(), system_nics_by_mac={MACADDR: nic}
+ )
expected = {
- 'version': 2,
- 'ethernets': {
+ "version": 2,
+ "ethernets": {
nic: {
- 'match': {'macaddress': MACADDR},
- 'addresses': [IP_BY_MACADDR + '/16'],
- 'gateway4': '1.2.3.5',
- 'nameservers': {
- 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8']}}}}
+ "match": {"macaddress": MACADDR},
+ "addresses": [IP_BY_MACADDR + "/16"],
+ "gateway4": "1.2.3.5",
+ "nameservers": {
+ "addresses": ["1.2.3.6", "1.2.3.7", "1.2.3.8"]
+ },
+ }
+ },
+ }
self.assertEqual(expected, net.gen_conf())
def test_eth0_v4v6_override(self):
self.maxDiff = None
context = {
- 'DNS': '1.2.3.8',
- 'ETH0_DNS': '1.2.3.6 1.2.3.7',
- 'ETH0_GATEWAY': '1.2.3.5',
- 'ETH0_GATEWAY6': IP6_GW,
- 'ETH0_IP': IP_BY_MACADDR,
- 'ETH0_IP6': IP6_GLOBAL,
- 'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX,
- 'ETH0_IP6_ULA': IP6_ULA,
- 'ETH0_MAC': '02:00:0a:12:01:01',
- 'ETH0_MASK': '255.255.0.0',
- 'ETH0_MTU': '1280',
- 'ETH0_NETWORK': '10.18.0.0',
- 'ETH0_SEARCH_DOMAIN': 'example.com example.org',
+ "DNS": "1.2.3.8",
+ "ETH0_DNS": "1.2.3.6 1.2.3.7",
+ "ETH0_GATEWAY": "1.2.3.5",
+ "ETH0_GATEWAY6": IP6_GW,
+ "ETH0_IP": IP_BY_MACADDR,
+ "ETH0_IP6": IP6_GLOBAL,
+ "ETH0_IP6_PREFIX_LENGTH": IP6_PREFIX,
+ "ETH0_IP6_ULA": IP6_ULA,
+ "ETH0_MAC": "02:00:0a:12:01:01",
+ "ETH0_MASK": "255.255.0.0",
+ "ETH0_MTU": "1280",
+ "ETH0_NETWORK": "10.18.0.0",
+ "ETH0_SEARCH_DOMAIN": "example.com example.org",
}
for nic in self.system_nics:
- net = ds.OpenNebulaNetwork(context, mock.Mock(),
- system_nics_by_mac={MACADDR: nic})
+ net = ds.OpenNebulaNetwork(
+ context, mock.Mock(), system_nics_by_mac={MACADDR: nic}
+ )
expected = {
- 'version': 2,
- 'ethernets': {
+ "version": 2,
+ "ethernets": {
nic: {
- 'match': {'macaddress': MACADDR},
- 'addresses': [
- IP_BY_MACADDR + '/16',
- IP6_GLOBAL + '/' + IP6_PREFIX,
- IP6_ULA + '/' + IP6_PREFIX],
- 'gateway4': '1.2.3.5',
- 'gateway6': IP6_GW,
- 'nameservers': {
- 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'],
- 'search': ['example.com', 'example.org']},
- 'mtu': '1280'}}}
+ "match": {"macaddress": MACADDR},
+ "addresses": [
+ IP_BY_MACADDR + "/16",
+ IP6_GLOBAL + "/" + IP6_PREFIX,
+ IP6_ULA + "/" + IP6_PREFIX,
+ ],
+ "gateway4": "1.2.3.5",
+ "gateway6": IP6_GW,
+ "nameservers": {
+ "addresses": ["1.2.3.6", "1.2.3.7", "1.2.3.8"],
+ "search": ["example.com", "example.org"],
+ },
+ "mtu": "1280",
+ }
+ },
+ }
self.assertEqual(expected, net.gen_conf())
@@ -867,62 +971,67 @@ class TestOpenNebulaNetwork(unittest.TestCase):
MAC_1 = "02:00:0a:12:01:01"
MAC_2 = "02:00:0a:12:01:02"
context = {
- 'DNS': '1.2.3.8',
- 'ETH0_DNS': '1.2.3.6 1.2.3.7',
- 'ETH0_GATEWAY': '1.2.3.5',
- 'ETH0_GATEWAY6': IP6_GW,
- 'ETH0_IP': '10.18.1.1',
- 'ETH0_IP6': IP6_GLOBAL,
- 'ETH0_IP6_PREFIX_LENGTH': '',
- 'ETH0_IP6_ULA': IP6_ULA,
- 'ETH0_MAC': MAC_2,
- 'ETH0_MASK': '255.255.0.0',
- 'ETH0_MTU': '1280',
- 'ETH0_NETWORK': '10.18.0.0',
- 'ETH0_SEARCH_DOMAIN': 'example.com',
- 'ETH3_DNS': '10.3.1.2',
- 'ETH3_GATEWAY': '10.3.0.1',
- 'ETH3_GATEWAY6': '',
- 'ETH3_IP': '10.3.1.3',
- 'ETH3_IP6': '',
- 'ETH3_IP6_PREFIX_LENGTH': '',
- 'ETH3_IP6_ULA': '',
- 'ETH3_MAC': MAC_1,
- 'ETH3_MASK': '255.255.0.0',
- 'ETH3_MTU': '',
- 'ETH3_NETWORK': '10.3.0.0',
- 'ETH3_SEARCH_DOMAIN': 'third.example.com third.example.org',
+ "DNS": "1.2.3.8",
+ "ETH0_DNS": "1.2.3.6 1.2.3.7",
+ "ETH0_GATEWAY": "1.2.3.5",
+ "ETH0_GATEWAY6": IP6_GW,
+ "ETH0_IP": "10.18.1.1",
+ "ETH0_IP6": IP6_GLOBAL,
+ "ETH0_IP6_PREFIX_LENGTH": "",
+ "ETH0_IP6_ULA": IP6_ULA,
+ "ETH0_MAC": MAC_2,
+ "ETH0_MASK": "255.255.0.0",
+ "ETH0_MTU": "1280",
+ "ETH0_NETWORK": "10.18.0.0",
+ "ETH0_SEARCH_DOMAIN": "example.com",
+ "ETH3_DNS": "10.3.1.2",
+ "ETH3_GATEWAY": "10.3.0.1",
+ "ETH3_GATEWAY6": "",
+ "ETH3_IP": "10.3.1.3",
+ "ETH3_IP6": "",
+ "ETH3_IP6_PREFIX_LENGTH": "",
+ "ETH3_IP6_ULA": "",
+ "ETH3_MAC": MAC_1,
+ "ETH3_MASK": "255.255.0.0",
+ "ETH3_MTU": "",
+ "ETH3_NETWORK": "10.3.0.0",
+ "ETH3_SEARCH_DOMAIN": "third.example.com third.example.org",
}
net = ds.OpenNebulaNetwork(
context,
mock.Mock(),
- system_nics_by_mac={MAC_1: 'enp0s25', MAC_2: 'enp1s2'}
+ system_nics_by_mac={MAC_1: "enp0s25", MAC_2: "enp1s2"},
)
expected = {
- 'version': 2,
- 'ethernets': {
- 'enp1s2': {
- 'match': {'macaddress': MAC_2},
- 'addresses': [
- '10.18.1.1/16',
- IP6_GLOBAL + '/64',
- IP6_ULA + '/64'],
- 'gateway4': '1.2.3.5',
- 'gateway6': IP6_GW,
- 'nameservers': {
- 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'],
- 'search': ['example.com']},
- 'mtu': '1280'},
- 'enp0s25': {
- 'match': {'macaddress': MAC_1},
- 'addresses': ['10.3.1.3/16'],
- 'gateway4': '10.3.0.1',
- 'nameservers': {
- 'addresses': ['10.3.1.2', '1.2.3.8'],
- 'search': [
- 'third.example.com',
- 'third.example.org']}}}}
+ "version": 2,
+ "ethernets": {
+ "enp1s2": {
+ "match": {"macaddress": MAC_2},
+ "addresses": [
+ "10.18.1.1/16",
+ IP6_GLOBAL + "/64",
+ IP6_ULA + "/64",
+ ],
+ "gateway4": "1.2.3.5",
+ "gateway6": IP6_GW,
+ "nameservers": {
+ "addresses": ["1.2.3.6", "1.2.3.7", "1.2.3.8"],
+ "search": ["example.com"],
+ },
+ "mtu": "1280",
+ },
+ "enp0s25": {
+ "match": {"macaddress": MAC_1},
+ "addresses": ["10.3.1.3/16"],
+ "gateway4": "10.3.0.1",
+ "nameservers": {
+ "addresses": ["10.3.1.2", "1.2.3.8"],
+ "search": ["third.example.com", "third.example.org"],
+ },
+ },
+ },
+ }
self.assertEqual(expected, net.gen_conf())
@@ -930,7 +1039,7 @@ class TestOpenNebulaNetwork(unittest.TestCase):
class TestParseShellConfig:
@pytest.mark.allow_subp_for("bash")
def test_no_seconds(self):
- cfg = '\n'.join(["foo=bar", "SECONDS=2", "xx=foo"])
+ cfg = "\n".join(["foo=bar", "SECONDS=2", "xx=foo"])
# we could test 'sleep 2', but that would make the test run slower.
ret = ds.parse_shell_config(cfg)
assert ret == {"foo": "bar", "xx": "foo"}
@@ -969,7 +1078,8 @@ class TestGetPhysicalNicsByMac:
def populate_context_dir(path, variables):
data = "# Context variables generated by OpenNebula\n"
for k, v in variables.items():
- data += ("%s='%s'\n" % (k.upper(), v.replace(r"'", r"'\''")))
- populate_dir(path, {'context.sh': data})
+ data += "%s='%s'\n" % (k.upper(), v.replace(r"'", r"'\''"))
+ populate_dir(path, {"context.sh": data})
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_openstack.py b/tests/unittests/sources/test_openstack.py
new file mode 100644
index 00000000..c111bbcd
--- /dev/null
+++ b/tests/unittests/sources/test_openstack.py
@@ -0,0 +1,788 @@
+# Copyright (C) 2014 Yahoo! Inc.
+#
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import copy
+import json
+import re
+from io import StringIO
+from urllib.parse import urlparse
+
+import httpretty as hp
+
+from cloudinit import helpers, settings, util
+from cloudinit.sources import UNSET, BrokenMetadata
+from cloudinit.sources import DataSourceOpenStack as ds
+from cloudinit.sources import convert_vendordata
+from cloudinit.sources.helpers import openstack
+from tests.unittests import helpers as test_helpers
+
+BASE_URL = "http://169.254.169.254"
+PUBKEY = "ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n"
+EC2_META = {
+ "ami-id": "ami-00000001",
+ "ami-launch-index": "0",
+ "ami-manifest-path": "FIXME",
+ "hostname": "sm-foo-test.novalocal",
+ "instance-action": "none",
+ "instance-id": "i-00000001",
+ "instance-type": "m1.tiny",
+ "local-hostname": "sm-foo-test.novalocal",
+ "local-ipv4": "0.0.0.0",
+ "public-hostname": "sm-foo-test.novalocal",
+ "public-ipv4": "0.0.0.1",
+ "reservation-id": "r-iru5qm4m",
+}
+USER_DATA = b"#!/bin/sh\necho This is user data\n"
+VENDOR_DATA = {
+ "magic": "",
+}
+VENDOR_DATA2 = {"static": {}}
+OSTACK_META = {
+ "availability_zone": "nova",
+ "files": [
+ {"content_path": "/content/0000", "path": "/etc/foo.cfg"},
+ {"content_path": "/content/0001", "path": "/etc/bar/bar.cfg"},
+ ],
+ "hostname": "sm-foo-test.novalocal",
+ "meta": {"dsmode": "local", "my-meta": "my-value"},
+ "name": "sm-foo-test",
+ "public_keys": {"mykey": PUBKEY},
+ "uuid": "b0fa911b-69d4-4476-bbe2-1c92bff6535c",
+}
+CONTENT_0 = b"This is contents of /etc/foo.cfg\n"
+CONTENT_1 = b"# this is /etc/bar/bar.cfg\n"
+OS_FILES = {
+ "openstack/content/0000": CONTENT_0,
+ "openstack/content/0001": CONTENT_1,
+ "openstack/latest/meta_data.json": json.dumps(OSTACK_META),
+ "openstack/latest/network_data.json": json.dumps(
+ {"links": [], "networks": [], "services": []}
+ ),
+ "openstack/latest/user_data": USER_DATA,
+ "openstack/latest/vendor_data.json": json.dumps(VENDOR_DATA),
+ "openstack/latest/vendor_data2.json": json.dumps(VENDOR_DATA2),
+}
+EC2_FILES = {
+ "latest/user-data": USER_DATA,
+}
+EC2_VERSIONS = [
+ "latest",
+]
+
+MOCK_PATH = "cloudinit.sources.DataSourceOpenStack."
+
+
+# TODO _register_uris should leverage test_ec2.register_mock_metaserver.
+def _register_uris(version, ec2_files, ec2_meta, os_files):
+ """Registers a set of url patterns into httpretty that will mimic the
+ same data returned by the openstack metadata service (and ec2 service)."""
+
+ def match_ec2_url(uri, headers):
+ path = uri.path.strip("/")
+ if len(path) == 0:
+ return (200, headers, "\n".join(EC2_VERSIONS))
+ path = uri.path.lstrip("/")
+ if path in ec2_files:
+ return (200, headers, ec2_files.get(path))
+ if path == "latest/meta-data/":
+ buf = StringIO()
+ for (k, v) in ec2_meta.items():
+ if isinstance(v, (list, tuple)):
+ buf.write("%s/" % (k))
+ else:
+ buf.write("%s" % (k))
+ buf.write("\n")
+ return (200, headers, buf.getvalue())
+ if path.startswith("latest/meta-data/"):
+ value = None
+ pieces = path.split("/")
+ if path.endswith("/"):
+ pieces = pieces[2:-1]
+ value = util.get_cfg_by_path(ec2_meta, pieces)
+ else:
+ pieces = pieces[2:]
+ value = util.get_cfg_by_path(ec2_meta, pieces)
+ if value is not None:
+ return (200, headers, str(value))
+ return (404, headers, "")
+
+ def match_os_uri(uri, headers):
+ path = uri.path.strip("/")
+ if path == "openstack":
+ return (200, headers, "\n".join([openstack.OS_LATEST]))
+ path = uri.path.lstrip("/")
+ if path in os_files:
+ return (200, headers, os_files.get(path))
+ return (404, headers, "")
+
+ def get_request_callback(method, uri, headers):
+ uri = urlparse(uri)
+ path = uri.path.lstrip("/").split("/")
+ if path[0] == "openstack":
+ return match_os_uri(uri, headers)
+ return match_ec2_url(uri, headers)
+
+ hp.register_uri(
+ hp.GET,
+ re.compile(r"http://169.254.169.254/.*"),
+ body=get_request_callback,
+ )
+
+
+def _read_metadata_service():
+ return ds.read_metadata_service(BASE_URL, retries=0, timeout=0.1)
+
+
+class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
+
+ with_logs = True
+ VERSION = "latest"
+
+ def setUp(self):
+ super(TestOpenStackDataSource, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ def test_successful(self):
+ _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES)
+ f = _read_metadata_service()
+ self.assertEqual(VENDOR_DATA, f.get("vendordata"))
+ self.assertEqual(VENDOR_DATA2, f.get("vendordata2"))
+ self.assertEqual(CONTENT_0, f["files"]["/etc/foo.cfg"])
+ self.assertEqual(CONTENT_1, f["files"]["/etc/bar/bar.cfg"])
+ self.assertEqual(2, len(f["files"]))
+ self.assertEqual(USER_DATA, f.get("userdata"))
+ self.assertEqual(EC2_META, f.get("ec2-metadata"))
+ self.assertEqual(2, f.get("version"))
+ metadata = f["metadata"]
+ self.assertEqual("nova", metadata.get("availability_zone"))
+ self.assertEqual("sm-foo-test.novalocal", metadata.get("hostname"))
+ self.assertEqual(
+ "sm-foo-test.novalocal", metadata.get("local-hostname")
+ )
+ self.assertEqual("sm-foo-test", metadata.get("name"))
+ self.assertEqual(
+ "b0fa911b-69d4-4476-bbe2-1c92bff6535c", metadata.get("uuid")
+ )
+ self.assertEqual(
+ "b0fa911b-69d4-4476-bbe2-1c92bff6535c", metadata.get("instance-id")
+ )
+
+ def test_no_ec2(self):
+ _register_uris(self.VERSION, {}, {}, OS_FILES)
+ f = _read_metadata_service()
+ self.assertEqual(VENDOR_DATA, f.get("vendordata"))
+ self.assertEqual(VENDOR_DATA2, f.get("vendordata2"))
+ self.assertEqual(CONTENT_0, f["files"]["/etc/foo.cfg"])
+ self.assertEqual(CONTENT_1, f["files"]["/etc/bar/bar.cfg"])
+ self.assertEqual(USER_DATA, f.get("userdata"))
+ self.assertEqual({}, f.get("ec2-metadata"))
+ self.assertEqual(2, f.get("version"))
+
+ def test_bad_metadata(self):
+ os_files = copy.deepcopy(OS_FILES)
+ for k in list(os_files.keys()):
+ if k.endswith("meta_data.json"):
+ os_files.pop(k, None)
+ _register_uris(self.VERSION, {}, {}, os_files)
+ self.assertRaises(openstack.NonReadable, _read_metadata_service)
+
+ def test_bad_uuid(self):
+ os_files = copy.deepcopy(OS_FILES)
+ os_meta = copy.deepcopy(OSTACK_META)
+ os_meta.pop("uuid")
+ for k in list(os_files.keys()):
+ if k.endswith("meta_data.json"):
+ os_files[k] = json.dumps(os_meta)
+ _register_uris(self.VERSION, {}, {}, os_files)
+ self.assertRaises(BrokenMetadata, _read_metadata_service)
+
+ def test_userdata_empty(self):
+ os_files = copy.deepcopy(OS_FILES)
+ for k in list(os_files.keys()):
+ if k.endswith("user_data"):
+ os_files.pop(k, None)
+ _register_uris(self.VERSION, {}, {}, os_files)
+ f = _read_metadata_service()
+ self.assertEqual(VENDOR_DATA, f.get("vendordata"))
+ self.assertEqual(VENDOR_DATA2, f.get("vendordata2"))
+ self.assertEqual(CONTENT_0, f["files"]["/etc/foo.cfg"])
+ self.assertEqual(CONTENT_1, f["files"]["/etc/bar/bar.cfg"])
+ self.assertFalse(f.get("userdata"))
+
+ def test_vendordata_empty(self):
+ os_files = copy.deepcopy(OS_FILES)
+ for k in list(os_files.keys()):
+ if k.endswith("vendor_data.json"):
+ os_files.pop(k, None)
+ _register_uris(self.VERSION, {}, {}, os_files)
+ f = _read_metadata_service()
+ self.assertEqual(CONTENT_0, f["files"]["/etc/foo.cfg"])
+ self.assertEqual(CONTENT_1, f["files"]["/etc/bar/bar.cfg"])
+ self.assertFalse(f.get("vendordata"))
+
+ def test_vendordata2_empty(self):
+ os_files = copy.deepcopy(OS_FILES)
+ for k in list(os_files.keys()):
+ if k.endswith("vendor_data2.json"):
+ os_files.pop(k, None)
+ _register_uris(self.VERSION, {}, {}, os_files)
+ f = _read_metadata_service()
+ self.assertEqual(CONTENT_0, f["files"]["/etc/foo.cfg"])
+ self.assertEqual(CONTENT_1, f["files"]["/etc/bar/bar.cfg"])
+ self.assertFalse(f.get("vendordata2"))
+
+ def test_vendordata_invalid(self):
+ os_files = copy.deepcopy(OS_FILES)
+ for k in list(os_files.keys()):
+ if k.endswith("vendor_data.json"):
+ os_files[k] = "{" # some invalid json
+ _register_uris(self.VERSION, {}, {}, os_files)
+ self.assertRaises(BrokenMetadata, _read_metadata_service)
+
+ def test_vendordata2_invalid(self):
+ os_files = copy.deepcopy(OS_FILES)
+ for k in list(os_files.keys()):
+ if k.endswith("vendor_data2.json"):
+ os_files[k] = "{" # some invalid json
+ _register_uris(self.VERSION, {}, {}, os_files)
+ self.assertRaises(BrokenMetadata, _read_metadata_service)
+
+ def test_metadata_invalid(self):
+ os_files = copy.deepcopy(OS_FILES)
+ for k in list(os_files.keys()):
+ if k.endswith("meta_data.json"):
+ os_files[k] = "{" # some invalid json
+ _register_uris(self.VERSION, {}, {}, os_files)
+ self.assertRaises(BrokenMetadata, _read_metadata_service)
+
+ @test_helpers.mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+ def test_datasource(self, m_dhcp):
+ _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES)
+ ds_os = ds.DataSourceOpenStack(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ self.assertIsNone(ds_os.version)
+ mock_path = MOCK_PATH + "detect_openstack"
+ with test_helpers.mock.patch(mock_path) as m_detect_os:
+ m_detect_os.return_value = True
+ found = ds_os.get_data()
+ self.assertTrue(found)
+ self.assertEqual(2, ds_os.version)
+ md = dict(ds_os.metadata)
+ md.pop("instance-id", None)
+ md.pop("local-hostname", None)
+ self.assertEqual(OSTACK_META, md)
+ self.assertEqual(EC2_META, ds_os.ec2_metadata)
+ self.assertEqual(USER_DATA, ds_os.userdata_raw)
+ self.assertEqual(2, len(ds_os.files))
+ self.assertEqual(VENDOR_DATA, ds_os.vendordata_pure)
+ self.assertEqual(VENDOR_DATA2, ds_os.vendordata2_pure)
+ self.assertIsNone(ds_os.vendordata_raw)
+ m_dhcp.assert_not_called()
+
+ @hp.activate
+ @test_helpers.mock.patch("cloudinit.net.dhcp.EphemeralIPv4Network")
+ @test_helpers.mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+ def test_local_datasource(self, m_dhcp, m_net):
+ """OpenStackLocal calls EphemeralDHCPNetwork and gets instance data."""
+ _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES)
+ ds_os_local = ds.DataSourceOpenStackLocal(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ ds_os_local._fallback_interface = "eth9" # Monkey patch for dhcp
+ m_dhcp.return_value = [
+ {
+ "interface": "eth9",
+ "fixed-address": "192.168.2.9",
+ "routers": "192.168.2.1",
+ "subnet-mask": "255.255.255.0",
+ "broadcast-address": "192.168.2.255",
+ }
+ ]
+
+ self.assertIsNone(ds_os_local.version)
+ mock_path = MOCK_PATH + "detect_openstack"
+ with test_helpers.mock.patch(mock_path) as m_detect_os:
+ m_detect_os.return_value = True
+ found = ds_os_local.get_data()
+ self.assertTrue(found)
+ self.assertEqual(2, ds_os_local.version)
+ md = dict(ds_os_local.metadata)
+ md.pop("instance-id", None)
+ md.pop("local-hostname", None)
+ self.assertEqual(OSTACK_META, md)
+ self.assertEqual(EC2_META, ds_os_local.ec2_metadata)
+ self.assertEqual(USER_DATA, ds_os_local.userdata_raw)
+ self.assertEqual(2, len(ds_os_local.files))
+ self.assertEqual(VENDOR_DATA, ds_os_local.vendordata_pure)
+ self.assertEqual(VENDOR_DATA2, ds_os_local.vendordata2_pure)
+ self.assertIsNone(ds_os_local.vendordata_raw)
+ m_dhcp.assert_called_with("eth9", None)
+
+ def test_bad_datasource_meta(self):
+ os_files = copy.deepcopy(OS_FILES)
+ for k in list(os_files.keys()):
+ if k.endswith("meta_data.json"):
+ os_files[k] = "{" # some invalid json
+ _register_uris(self.VERSION, {}, {}, os_files)
+ ds_os = ds.DataSourceOpenStack(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ self.assertIsNone(ds_os.version)
+ mock_path = MOCK_PATH + "detect_openstack"
+ with test_helpers.mock.patch(mock_path) as m_detect_os:
+ m_detect_os.return_value = True
+ found = ds_os.get_data()
+ self.assertFalse(found)
+ self.assertIsNone(ds_os.version)
+ self.assertIn(
+ "InvalidMetaDataException: Broken metadata address"
+ " http://169.254.169.25",
+ self.logs.getvalue(),
+ )
+
+ def test_no_datasource(self):
+ os_files = copy.deepcopy(OS_FILES)
+ for k in list(os_files.keys()):
+ if k.endswith("meta_data.json"):
+ os_files.pop(k)
+ _register_uris(self.VERSION, {}, {}, os_files)
+ ds_os = ds.DataSourceOpenStack(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ ds_os.ds_cfg = {
+ "max_wait": 0,
+ "timeout": 0,
+ }
+ self.assertIsNone(ds_os.version)
+ mock_path = MOCK_PATH + "detect_openstack"
+ with test_helpers.mock.patch(mock_path) as m_detect_os:
+ m_detect_os.return_value = True
+ found = ds_os.get_data()
+ self.assertFalse(found)
+ self.assertIsNone(ds_os.version)
+
+ def test_network_config_disabled_by_datasource_config(self):
+ """The network_config can be disabled from datasource config."""
+ mock_path = MOCK_PATH + "openstack.convert_net_json"
+ ds_os = ds.DataSourceOpenStack(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ ds_os.ds_cfg = {"apply_network_config": False}
+ sample_json = {
+ "links": [{"ethernet_mac_address": "mymac"}],
+ "networks": [],
+ "services": [],
+ }
+ ds_os.network_json = sample_json # Ignore this content from metadata
+ with test_helpers.mock.patch(mock_path) as m_convert_json:
+ self.assertIsNone(ds_os.network_config)
+ m_convert_json.assert_not_called()
+
+ def test_network_config_from_network_json(self):
+ """The datasource gets network_config from network_data.json."""
+ mock_path = MOCK_PATH + "openstack.convert_net_json"
+ example_cfg = {"version": 1, "config": []}
+ ds_os = ds.DataSourceOpenStack(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ sample_json = {
+ "links": [{"ethernet_mac_address": "mymac"}],
+ "networks": [],
+ "services": [],
+ }
+ ds_os.network_json = sample_json
+ with test_helpers.mock.patch(mock_path) as m_convert_json:
+ m_convert_json.return_value = example_cfg
+ self.assertEqual(example_cfg, ds_os.network_config)
+ self.assertIn(
+ "network config provided via network_json", self.logs.getvalue()
+ )
+ m_convert_json.assert_called_with(sample_json, known_macs=None)
+
+ def test_network_config_cached(self):
+ """The datasource caches the network_config property."""
+ mock_path = MOCK_PATH + "openstack.convert_net_json"
+ example_cfg = {"version": 1, "config": []}
+ ds_os = ds.DataSourceOpenStack(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ ds_os._network_config = example_cfg
+ with test_helpers.mock.patch(mock_path) as m_convert_json:
+ self.assertEqual(example_cfg, ds_os.network_config)
+ m_convert_json.assert_not_called()
+
+ def test_disabled_datasource(self):
+ os_files = copy.deepcopy(OS_FILES)
+ os_meta = copy.deepcopy(OSTACK_META)
+ os_meta["meta"] = {
+ "dsmode": "disabled",
+ }
+ for k in list(os_files.keys()):
+ if k.endswith("meta_data.json"):
+ os_files[k] = json.dumps(os_meta)
+ _register_uris(self.VERSION, {}, {}, os_files)
+ ds_os = ds.DataSourceOpenStack(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ ds_os.ds_cfg = {
+ "max_wait": 0,
+ "timeout": 0,
+ }
+ self.assertIsNone(ds_os.version)
+ mock_path = MOCK_PATH + "detect_openstack"
+ with test_helpers.mock.patch(mock_path) as m_detect_os:
+ m_detect_os.return_value = True
+ found = ds_os.get_data()
+ self.assertFalse(found)
+ self.assertIsNone(ds_os.version)
+
+ @hp.activate
+ def test_wb__crawl_metadata_does_not_persist(self):
+ """_crawl_metadata returns current metadata and does not cache."""
+ _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES)
+ ds_os = ds.DataSourceOpenStack(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ crawled_data = ds_os._crawl_metadata()
+ self.assertEqual(UNSET, ds_os.ec2_metadata)
+ self.assertIsNone(ds_os.userdata_raw)
+ self.assertEqual(0, len(ds_os.files))
+ self.assertIsNone(ds_os.vendordata_raw)
+ self.assertEqual(
+ [
+ "dsmode",
+ "ec2-metadata",
+ "files",
+ "metadata",
+ "networkdata",
+ "userdata",
+ "vendordata",
+ "vendordata2",
+ "version",
+ ],
+ sorted(crawled_data.keys()),
+ )
+ self.assertEqual("local", crawled_data["dsmode"])
+ self.assertEqual(EC2_META, crawled_data["ec2-metadata"])
+ self.assertEqual(2, len(crawled_data["files"]))
+ md = copy.deepcopy(crawled_data["metadata"])
+ md.pop("instance-id")
+ md.pop("local-hostname")
+ self.assertEqual(OSTACK_META, md)
+ self.assertEqual(
+ json.loads(OS_FILES["openstack/latest/network_data.json"]),
+ crawled_data["networkdata"],
+ )
+ self.assertEqual(USER_DATA, crawled_data["userdata"])
+ self.assertEqual(VENDOR_DATA, crawled_data["vendordata"])
+ self.assertEqual(VENDOR_DATA2, crawled_data["vendordata2"])
+ self.assertEqual(2, crawled_data["version"])
+
+
+class TestVendorDataLoading(test_helpers.TestCase):
+ def cvj(self, data):
+ return convert_vendordata(data)
+
+ def test_vd_load_none(self):
+ # non-existant vendor-data should return none
+ self.assertIsNone(self.cvj(None))
+
+ def test_vd_load_string(self):
+ self.assertEqual(self.cvj("foobar"), "foobar")
+
+ def test_vd_load_list(self):
+ data = [{"foo": "bar"}, "mystring", list(["another", "list"])]
+ self.assertEqual(self.cvj(data), data)
+
+ def test_vd_load_dict_no_ci(self):
+ self.assertIsNone(self.cvj({"foo": "bar"}))
+
+ def test_vd_load_dict_ci_dict(self):
+ self.assertRaises(
+ ValueError, self.cvj, {"foo": "bar", "cloud-init": {"x": 1}}
+ )
+
+ def test_vd_load_dict_ci_string(self):
+ data = {"foo": "bar", "cloud-init": "VENDOR_DATA"}
+ self.assertEqual(self.cvj(data), data["cloud-init"])
+
+ def test_vd_load_dict_ci_list(self):
+ data = {"foo": "bar", "cloud-init": ["VD_1", "VD_2"]}
+ self.assertEqual(self.cvj(data), data["cloud-init"])
+
+
+@test_helpers.mock.patch(MOCK_PATH + "util.is_x86")
+class TestDetectOpenStack(test_helpers.CiTestCase):
+ def test_detect_openstack_non_intel_x86(self, m_is_x86):
+ """Return True on non-intel platforms because dmi isn't conclusive."""
+ m_is_x86.return_value = False
+ self.assertTrue(
+ ds.detect_openstack(), "Expected detect_openstack == True"
+ )
+
+ @test_helpers.mock.patch(MOCK_PATH + "util.get_proc_env")
+ @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data")
+ def test_not_detect_openstack_intel_x86_ec2(
+ self, m_dmi, m_proc_env, m_is_x86
+ ):
+ """Return False on EC2 platforms."""
+ m_is_x86.return_value = True
+ # No product_name in proc/1/environ
+ m_proc_env.return_value = {"HOME": "/"}
+
+ def fake_dmi_read(dmi_key):
+ if dmi_key == "system-product-name":
+ return "HVM domU" # Nothing 'openstackish' on EC2
+ if dmi_key == "chassis-asset-tag":
+ return "" # Empty string on EC2
+ assert False, "Unexpected dmi read of %s" % dmi_key
+
+ m_dmi.side_effect = fake_dmi_read
+ self.assertFalse(
+ ds.detect_openstack(), "Expected detect_openstack == False on EC2"
+ )
+ m_proc_env.assert_called_with(1)
+
+ @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data")
+ def test_detect_openstack_intel_product_name_compute(
+ self, m_dmi, m_is_x86
+ ):
+ """Return True on OpenStack compute and nova instances."""
+ m_is_x86.return_value = True
+ openstack_product_names = ["OpenStack Nova", "OpenStack Compute"]
+
+ for product_name in openstack_product_names:
+ m_dmi.return_value = product_name
+ self.assertTrue(
+ ds.detect_openstack(), "Failed to detect_openstack"
+ )
+
+ @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data")
+ def test_detect_openstack_opentelekomcloud_chassis_asset_tag(
+ self, m_dmi, m_is_x86
+ ):
+ """Return True on OpenStack reporting OpenTelekomCloud asset-tag."""
+ m_is_x86.return_value = True
+
+ def fake_dmi_read(dmi_key):
+ if dmi_key == "system-product-name":
+ return "HVM domU" # Nothing 'openstackish' on OpenTelekomCloud
+ if dmi_key == "chassis-asset-tag":
+ return "OpenTelekomCloud"
+ assert False, "Unexpected dmi read of %s" % dmi_key
+
+ m_dmi.side_effect = fake_dmi_read
+ self.assertTrue(
+ ds.detect_openstack(),
+ "Expected detect_openstack == True on OpenTelekomCloud",
+ )
+
+ @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data")
+ def test_detect_openstack_sapccloud_chassis_asset_tag(
+ self, m_dmi, m_is_x86
+ ):
+ """Return True on OpenStack reporting SAP CCloud VM asset-tag."""
+ m_is_x86.return_value = True
+
+ def fake_dmi_read(dmi_key):
+ if dmi_key == "system-product-name":
+ return "VMware Virtual Platform" # SAP CCloud uses VMware
+ if dmi_key == "chassis-asset-tag":
+ return "SAP CCloud VM"
+ assert False, "Unexpected dmi read of %s" % dmi_key
+
+ m_dmi.side_effect = fake_dmi_read
+ self.assertTrue(
+ ds.detect_openstack(),
+ "Expected detect_openstack == True on SAP CCloud VM",
+ )
+
+ @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data")
+ def test_detect_openstack_oraclecloud_chassis_asset_tag(
+ self, m_dmi, m_is_x86
+ ):
+ """Return True on OpenStack reporting Oracle cloud asset-tag."""
+ m_is_x86.return_value = True
+
+ def fake_dmi_read(dmi_key):
+ if dmi_key == "system-product-name":
+ return "Standard PC (i440FX + PIIX, 1996)" # No match
+ if dmi_key == "chassis-asset-tag":
+ return "OracleCloud.com"
+ assert False, "Unexpected dmi read of %s" % dmi_key
+
+ m_dmi.side_effect = fake_dmi_read
+ self.assertTrue(
+ ds.detect_openstack(accept_oracle=True),
+ "Expected detect_openstack == True on OracleCloud.com",
+ )
+ self.assertFalse(
+ ds.detect_openstack(accept_oracle=False),
+ "Expected detect_openstack == False.",
+ )
+
+ def _test_detect_openstack_nova_compute_chassis_asset_tag(
+ self, m_dmi, m_is_x86, chassis_tag
+ ):
+ """Return True on OpenStack reporting generic asset-tag."""
+ m_is_x86.return_value = True
+
+ def fake_dmi_read(dmi_key):
+ if dmi_key == "system-product-name":
+ return "Generic OpenStack Platform"
+ if dmi_key == "chassis-asset-tag":
+ return chassis_tag
+ assert False, "Unexpected dmi read of %s" % dmi_key
+
+ m_dmi.side_effect = fake_dmi_read
+ self.assertTrue(
+ ds.detect_openstack(),
+ "Expected detect_openstack == True on Generic OpenStack Platform",
+ )
+
+ @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data")
+ def test_detect_openstack_nova_chassis_asset_tag(self, m_dmi, m_is_x86):
+ self._test_detect_openstack_nova_compute_chassis_asset_tag(
+ m_dmi, m_is_x86, "OpenStack Nova"
+ )
+
+ @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data")
+ def test_detect_openstack_compute_chassis_asset_tag(self, m_dmi, m_is_x86):
+ self._test_detect_openstack_nova_compute_chassis_asset_tag(
+ m_dmi, m_is_x86, "OpenStack Compute"
+ )
+
+ @test_helpers.mock.patch(MOCK_PATH + "util.get_proc_env")
+ @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data")
+ def test_detect_openstack_by_proc_1_environ(
+ self, m_dmi, m_proc_env, m_is_x86
+ ):
+ """Return True when nova product_name specified in /proc/1/environ."""
+ m_is_x86.return_value = True
+ # Nova product_name in proc/1/environ
+ m_proc_env.return_value = {
+ "HOME": "/",
+ "product_name": "OpenStack Nova",
+ }
+
+ def fake_dmi_read(dmi_key):
+ if dmi_key == "system-product-name":
+ return "HVM domU" # Nothing 'openstackish'
+ if dmi_key == "chassis-asset-tag":
+ return "" # Nothin 'openstackish'
+ assert False, "Unexpected dmi read of %s" % dmi_key
+
+ m_dmi.side_effect = fake_dmi_read
+ self.assertTrue(
+ ds.detect_openstack(),
+ "Expected detect_openstack == True on OpenTelekomCloud",
+ )
+ m_proc_env.assert_called_with(1)
+
+
+class TestMetadataReader(test_helpers.HttprettyTestCase):
+ """Test the MetadataReader."""
+
+ burl = "http://169.254.169.254/"
+ md_base = {
+ "availability_zone": "myaz1",
+ "hostname": "sm-foo-test.novalocal",
+ "keys": [{"data": PUBKEY, "name": "brickies", "type": "ssh"}],
+ "launch_index": 0,
+ "name": "sm-foo-test",
+ "public_keys": {"mykey": PUBKEY},
+ "project_id": "6a103f813b774b9fb15a4fcd36e1c056",
+ "uuid": "b0fa911b-69d4-4476-bbe2-1c92bff6535c",
+ }
+
+ def register(self, path, body=None, status=200):
+ content = body if not isinstance(body, str) else body.encode("utf-8")
+ hp.register_uri(
+ hp.GET, self.burl + "openstack" + path, status=status, body=content
+ )
+
+ def register_versions(self, versions):
+ self.register("", "\n".join(versions))
+ self.register("/", "\n".join(versions))
+
+ def register_version(self, version, data):
+ content = "\n".join(sorted(data.keys()))
+ self.register(version, content)
+ self.register(version + "/", content)
+ for path, content in data.items():
+ self.register("/%s/%s" % (version, path), content)
+ self.register("/%s/%s" % (version, path), content)
+ if "user_data" not in data:
+ self.register("/%s/user_data" % version, "nodata", status=404)
+
+ def test__find_working_version(self):
+ """Test a working version ignores unsupported."""
+ unsup = "2016-11-09"
+ self.register_versions(
+ [
+ openstack.OS_FOLSOM,
+ openstack.OS_LIBERTY,
+ unsup,
+ openstack.OS_LATEST,
+ ]
+ )
+ self.assertEqual(
+ openstack.OS_LIBERTY,
+ openstack.MetadataReader(self.burl)._find_working_version(),
+ )
+
+ def test__find_working_version_uses_latest(self):
+ """'latest' should be used if no supported versions."""
+ unsup1, unsup2 = ("2016-11-09", "2017-06-06")
+ self.register_versions([unsup1, unsup2, openstack.OS_LATEST])
+ self.assertEqual(
+ openstack.OS_LATEST,
+ openstack.MetadataReader(self.burl)._find_working_version(),
+ )
+
+ def test_read_v2_os_ocata(self):
+ """Validate return value of read_v2 for os_ocata data."""
+ md = copy.deepcopy(self.md_base)
+ md["devices"] = []
+ network_data = {"links": [], "networks": [], "services": []}
+ vendor_data = {}
+ vendor_data2 = {"static": {}}
+
+ data = {
+ "meta_data.json": json.dumps(md),
+ "network_data.json": json.dumps(network_data),
+ "vendor_data.json": json.dumps(vendor_data),
+ "vendor_data2.json": json.dumps(vendor_data2),
+ }
+
+ self.register_versions([openstack.OS_OCATA, openstack.OS_LATEST])
+ self.register_version(openstack.OS_OCATA, data)
+
+ mock_read_ec2 = test_helpers.mock.MagicMock(
+ return_value={"instance-id": "unused-ec2"}
+ )
+ expected_md = copy.deepcopy(md)
+ expected_md.update(
+ {"instance-id": md["uuid"], "local-hostname": md["hostname"]}
+ )
+ expected = {
+ "userdata": "", # Annoying, no user-data results in empty string.
+ "version": 2,
+ "metadata": expected_md,
+ "vendordata": vendor_data,
+ "vendordata2": vendor_data2,
+ "networkdata": network_data,
+ "ec2-metadata": mock_read_ec2.return_value,
+ "files": {},
+ }
+ reader = openstack.MetadataReader(self.burl)
+ reader._read_ec2_metadata = mock_read_ec2
+ self.assertEqual(expected, reader.read_v2())
+ self.assertEqual(1, mock_read_ec2.call_count)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_oracle.py b/tests/unittests/sources/test_oracle.py
new file mode 100644
index 00000000..b3e6f10c
--- /dev/null
+++ b/tests/unittests/sources/test_oracle.py
@@ -0,0 +1,933 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import base64
+import copy
+import json
+from contextlib import ExitStack
+from unittest import mock
+
+import pytest
+
+from cloudinit.sources import DataSourceOracle as oracle
+from cloudinit.sources import NetworkConfigSource
+from cloudinit.sources.DataSourceOracle import OpcMetadata
+from cloudinit.url_helper import UrlError
+from tests.unittests import helpers as test_helpers
+
+DS_PATH = "cloudinit.sources.DataSourceOracle"
+
+# `curl -L http://169.254.169.254/opc/v1/vnics/` on a Oracle Bare Metal Machine
+# with a secondary VNIC attached (vnicId truncated for Python line length)
+OPC_BM_SECONDARY_VNIC_RESPONSE = """\
+[ {
+ "vnicId" : "ocid1.vnic.oc1.phx.abyhqljtyvcucqkhdqmgjszebxe4hrb!!TRUNCATED||",
+ "privateIp" : "10.0.0.8",
+ "vlanTag" : 0,
+ "macAddr" : "90:e2:ba:d4:f1:68",
+ "virtualRouterIp" : "10.0.0.1",
+ "subnetCidrBlock" : "10.0.0.0/24",
+ "nicIndex" : 0
+}, {
+ "vnicId" : "ocid1.vnic.oc1.phx.abyhqljtfmkxjdy2sqidndiwrsg63zf!!TRUNCATED||",
+ "privateIp" : "10.0.4.5",
+ "vlanTag" : 1,
+ "macAddr" : "02:00:17:05:CF:51",
+ "virtualRouterIp" : "10.0.4.1",
+ "subnetCidrBlock" : "10.0.4.0/24",
+ "nicIndex" : 0
+} ]"""
+
+# `curl -L http://169.254.169.254/opc/v1/vnics/` on a Oracle Virtual Machine
+# with a secondary VNIC attached
+OPC_VM_SECONDARY_VNIC_RESPONSE = """\
+[ {
+ "vnicId" : "ocid1.vnic.oc1.phx.abyhqljtch72z5pd76cc2636qeqh7z_truncated",
+ "privateIp" : "10.0.0.230",
+ "vlanTag" : 1039,
+ "macAddr" : "02:00:17:05:D1:DB",
+ "virtualRouterIp" : "10.0.0.1",
+ "subnetCidrBlock" : "10.0.0.0/24"
+}, {
+ "vnicId" : "ocid1.vnic.oc1.phx.abyhqljt4iew3gwmvrwrhhf3bp5drj_truncated",
+ "privateIp" : "10.0.0.231",
+ "vlanTag" : 1041,
+ "macAddr" : "00:00:17:02:2B:B1",
+ "virtualRouterIp" : "10.0.0.1",
+ "subnetCidrBlock" : "10.0.0.0/24"
+} ]"""
+
+
+# Fetched with `curl http://169.254.169.254/opc/v1/instance/` (and then
+# truncated for line length)
+OPC_V2_METADATA = """\
+{
+ "availabilityDomain" : "qIZq:PHX-AD-1",
+ "faultDomain" : "FAULT-DOMAIN-2",
+ "compartmentId" : "ocid1.tenancy.oc1..aaaaaaaao7f7cccogqrg5emjxkxmTRUNCATED",
+ "displayName" : "instance-20200320-1400",
+ "hostname" : "instance-20200320-1400",
+ "id" : "ocid1.instance.oc1.phx.anyhqljtniwq6syc3nex55sep5w34qbwmw6TRUNCATED",
+ "image" : "ocid1.image.oc1.phx.aaaaaaaagmkn4gdhvvx24kiahh2b2qchsicTRUNCATED",
+ "metadata" : {
+ "ssh_authorized_keys" : "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQ truncated",
+ "user_data" : "IyEvYmluL3NoCnRvdWNoIC90bXAvZm9v"
+ },
+ "region" : "phx",
+ "canonicalRegionName" : "us-phoenix-1",
+ "ociAdName" : "phx-ad-3",
+ "shape" : "VM.Standard2.1",
+ "state" : "Running",
+ "timeCreated" : 1584727285318,
+ "agentConfig" : {
+ "monitoringDisabled" : true,
+ "managementDisabled" : true
+ }
+}"""
+
+# Just a small meaningless change to differentiate the two metadatas
+OPC_V1_METADATA = OPC_V2_METADATA.replace("ocid1.instance", "ocid2.instance")
+
+
+@pytest.fixture
+def metadata_version():
+ return 2
+
+
+@pytest.fixture
+def oracle_ds(request, fixture_utils, paths, metadata_version):
+ """
+ Return an instantiated DataSourceOracle.
+
+ This also performs the mocking required for the default test case:
+ * ``_read_system_uuid`` returns something,
+ * ``_is_platform_viable`` returns True,
+ * ``_is_iscsi_root`` returns True (the simpler code path),
+ * ``read_opc_metadata`` returns ``OPC_V1_METADATA``
+
+ (This uses the paths fixture for the required helpers.Paths object, and the
+ fixture_utils fixture for fetching markers.)
+ """
+ sys_cfg = fixture_utils.closest_marker_first_arg_or(
+ request, "ds_sys_cfg", mock.MagicMock()
+ )
+ metadata = OpcMetadata(metadata_version, json.loads(OPC_V2_METADATA), None)
+ with mock.patch(DS_PATH + "._read_system_uuid", return_value="someuuid"):
+ with mock.patch(DS_PATH + "._is_platform_viable", return_value=True):
+ with mock.patch(DS_PATH + "._is_iscsi_root", return_value=True):
+ with mock.patch(
+ DS_PATH + ".read_opc_metadata",
+ return_value=metadata,
+ ):
+ yield oracle.DataSourceOracle(
+ sys_cfg=sys_cfg,
+ distro=mock.Mock(),
+ paths=paths,
+ )
+
+
+class TestDataSourceOracle:
+ def test_platform_info(self, oracle_ds):
+ assert "oracle" == oracle_ds.cloud_name
+ assert "oracle" == oracle_ds.platform_type
+
+ def test_subplatform_before_fetch(self, oracle_ds):
+ assert "unknown" == oracle_ds.subplatform
+
+ def test_platform_info_after_fetch(self, oracle_ds):
+ oracle_ds._get_data()
+ assert (
+ "metadata (http://169.254.169.254/opc/v2/)"
+ == oracle_ds.subplatform
+ )
+
+ @pytest.mark.parametrize("metadata_version", [1])
+ def test_v1_platform_info_after_fetch(self, oracle_ds):
+ oracle_ds._get_data()
+ assert (
+ "metadata (http://169.254.169.254/opc/v1/)"
+ == oracle_ds.subplatform
+ )
+
+ def test_secondary_nics_disabled_by_default(self, oracle_ds):
+ assert not oracle_ds.ds_cfg["configure_secondary_nics"]
+
+ @pytest.mark.ds_sys_cfg(
+ {"datasource": {"Oracle": {"configure_secondary_nics": True}}}
+ )
+ def test_sys_cfg_can_enable_configure_secondary_nics(self, oracle_ds):
+ assert oracle_ds.ds_cfg["configure_secondary_nics"]
+
+
+class TestIsPlatformViable(test_helpers.CiTestCase):
+ @mock.patch(
+ DS_PATH + ".dmi.read_dmi_data", return_value=oracle.CHASSIS_ASSET_TAG
+ )
+ def test_expected_viable(self, m_read_dmi_data):
+ """System with known chassis tag is viable."""
+ self.assertTrue(oracle._is_platform_viable())
+ m_read_dmi_data.assert_has_calls([mock.call("chassis-asset-tag")])
+
+ @mock.patch(DS_PATH + ".dmi.read_dmi_data", return_value=None)
+ def test_expected_not_viable_dmi_data_none(self, m_read_dmi_data):
+ """System without known chassis tag is not viable."""
+ self.assertFalse(oracle._is_platform_viable())
+ m_read_dmi_data.assert_has_calls([mock.call("chassis-asset-tag")])
+
+ @mock.patch(DS_PATH + ".dmi.read_dmi_data", return_value="LetsGoCubs")
+ def test_expected_not_viable_other(self, m_read_dmi_data):
+ """System with unnown chassis tag is not viable."""
+ self.assertFalse(oracle._is_platform_viable())
+ m_read_dmi_data.assert_has_calls([mock.call("chassis-asset-tag")])
+
+
+@mock.patch(
+ "cloudinit.net.is_openvswitch_internal_interface",
+ mock.Mock(return_value=False),
+)
+class TestNetworkConfigFromOpcImds:
+ def test_no_secondary_nics_does_not_mutate_input(self, oracle_ds):
+ oracle_ds._vnics_data = [{}]
+ # We test this by using in a non-dict to ensure that no dict
+ # operations are used; failure would be seen as exceptions
+ oracle_ds._network_config = object()
+ oracle_ds._add_network_config_from_opc_imds()
+
+ def test_bare_metal_machine_skipped(self, oracle_ds, caplog):
+ # nicIndex in the first entry indicates a bare metal machine
+ oracle_ds._vnics_data = json.loads(OPC_BM_SECONDARY_VNIC_RESPONSE)
+ # We test this by using a non-dict to ensure that no dict
+ # operations are used
+ oracle_ds._network_config = object()
+ oracle_ds._add_network_config_from_opc_imds()
+ assert "bare metal machine" in caplog.text
+
+ def test_missing_mac_skipped(self, oracle_ds, caplog):
+ oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE)
+
+ oracle_ds._network_config = {
+ "version": 1,
+ "config": [{"primary": "nic"}],
+ }
+ with mock.patch(DS_PATH + ".get_interfaces_by_mac", return_value={}):
+ oracle_ds._add_network_config_from_opc_imds()
+
+ assert 1 == len(oracle_ds.network_config["config"])
+ assert (
+ "Interface with MAC 00:00:17:02:2b:b1 not found; skipping"
+ in caplog.text
+ )
+
+ def test_missing_mac_skipped_v2(self, oracle_ds, caplog):
+ oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE)
+
+ oracle_ds._network_config = {
+ "version": 2,
+ "ethernets": {"primary": {"nic": {}}},
+ }
+ with mock.patch(DS_PATH + ".get_interfaces_by_mac", return_value={}):
+ oracle_ds._add_network_config_from_opc_imds()
+
+ assert 1 == len(oracle_ds.network_config["ethernets"])
+ assert (
+ "Interface with MAC 00:00:17:02:2b:b1 not found; skipping"
+ in caplog.text
+ )
+
+ def test_secondary_nic(self, oracle_ds):
+ oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE)
+ oracle_ds._network_config = {
+ "version": 1,
+ "config": [{"primary": "nic"}],
+ }
+ mac_addr, nic_name = "00:00:17:02:2b:b1", "ens3"
+ with mock.patch(
+ DS_PATH + ".get_interfaces_by_mac",
+ return_value={mac_addr: nic_name},
+ ):
+ oracle_ds._add_network_config_from_opc_imds()
+
+ # The input is mutated
+ assert 2 == len(oracle_ds.network_config["config"])
+
+ secondary_nic_cfg = oracle_ds.network_config["config"][1]
+ assert nic_name == secondary_nic_cfg["name"]
+ assert "physical" == secondary_nic_cfg["type"]
+ assert mac_addr == secondary_nic_cfg["mac_address"]
+ assert 9000 == secondary_nic_cfg["mtu"]
+
+ assert 1 == len(secondary_nic_cfg["subnets"])
+ subnet_cfg = secondary_nic_cfg["subnets"][0]
+ # These values are hard-coded in OPC_VM_SECONDARY_VNIC_RESPONSE
+ assert "10.0.0.231" == subnet_cfg["address"]
+
+ def test_secondary_nic_v2(self, oracle_ds):
+ oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE)
+ oracle_ds._network_config = {
+ "version": 2,
+ "ethernets": {"primary": {"nic": {}}},
+ }
+ mac_addr, nic_name = "00:00:17:02:2b:b1", "ens3"
+ with mock.patch(
+ DS_PATH + ".get_interfaces_by_mac",
+ return_value={mac_addr: nic_name},
+ ):
+ oracle_ds._add_network_config_from_opc_imds()
+
+ # The input is mutated
+ assert 2 == len(oracle_ds.network_config["ethernets"])
+
+ secondary_nic_cfg = oracle_ds.network_config["ethernets"]["ens3"]
+ assert secondary_nic_cfg["dhcp4"] is False
+ assert secondary_nic_cfg["dhcp6"] is False
+ assert mac_addr == secondary_nic_cfg["match"]["macaddress"]
+ assert 9000 == secondary_nic_cfg["mtu"]
+
+ assert 1 == len(secondary_nic_cfg["addresses"])
+ # These values are hard-coded in OPC_VM_SECONDARY_VNIC_RESPONSE
+ assert "10.0.0.231" == secondary_nic_cfg["addresses"][0]
+
+
+class TestNetworkConfigFiltersNetFailover(test_helpers.CiTestCase):
+ def setUp(self):
+ super(TestNetworkConfigFiltersNetFailover, self).setUp()
+ self.add_patch(
+ DS_PATH + ".get_interfaces_by_mac", "m_get_interfaces_by_mac"
+ )
+ self.add_patch(DS_PATH + ".is_netfail_master", "m_netfail_master")
+
+ def test_ignore_bogus_network_config(self):
+ netcfg = {"something": "here"}
+ passed_netcfg = copy.copy(netcfg)
+ oracle._ensure_netfailover_safe(passed_netcfg)
+ self.assertEqual(netcfg, passed_netcfg)
+
+ def test_ignore_network_config_unknown_versions(self):
+ netcfg = {"something": "here", "version": 3}
+ passed_netcfg = copy.copy(netcfg)
+ oracle._ensure_netfailover_safe(passed_netcfg)
+ self.assertEqual(netcfg, passed_netcfg)
+
+ def test_checks_v1_type_physical_interfaces(self):
+ mac_addr, nic_name = "00:00:17:02:2b:b1", "ens3"
+ self.m_get_interfaces_by_mac.return_value = {
+ mac_addr: nic_name,
+ }
+ netcfg = {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": nic_name,
+ "mac_address": mac_addr,
+ "subnets": [{"type": "dhcp4"}],
+ }
+ ],
+ }
+ passed_netcfg = copy.copy(netcfg)
+ self.m_netfail_master.return_value = False
+ oracle._ensure_netfailover_safe(passed_netcfg)
+ self.assertEqual(netcfg, passed_netcfg)
+ self.assertEqual(
+ [mock.call(nic_name)], self.m_netfail_master.call_args_list
+ )
+
+ def test_checks_v1_skips_non_phys_interfaces(self):
+ mac_addr, nic_name = "00:00:17:02:2b:b1", "bond0"
+ self.m_get_interfaces_by_mac.return_value = {
+ mac_addr: nic_name,
+ }
+ netcfg = {
+ "version": 1,
+ "config": [
+ {
+ "type": "bond",
+ "name": nic_name,
+ "mac_address": mac_addr,
+ "subnets": [{"type": "dhcp4"}],
+ }
+ ],
+ }
+ passed_netcfg = copy.copy(netcfg)
+ oracle._ensure_netfailover_safe(passed_netcfg)
+ self.assertEqual(netcfg, passed_netcfg)
+ self.assertEqual(0, self.m_netfail_master.call_count)
+
+ def test_removes_master_mac_property_v1(self):
+ nic_master, mac_master = "ens3", self.random_string()
+ nic_other, mac_other = "ens7", self.random_string()
+ nic_extra, mac_extra = "enp0s1f2", self.random_string()
+ self.m_get_interfaces_by_mac.return_value = {
+ mac_master: nic_master,
+ mac_other: nic_other,
+ mac_extra: nic_extra,
+ }
+ netcfg = {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": nic_master,
+ "mac_address": mac_master,
+ },
+ {
+ "type": "physical",
+ "name": nic_other,
+ "mac_address": mac_other,
+ },
+ {
+ "type": "physical",
+ "name": nic_extra,
+ "mac_address": mac_extra,
+ },
+ ],
+ }
+
+ def _is_netfail_master(iface):
+ if iface == "ens3":
+ return True
+ return False
+
+ self.m_netfail_master.side_effect = _is_netfail_master
+ expected_cfg = {
+ "version": 1,
+ "config": [
+ {"type": "physical", "name": nic_master},
+ {
+ "type": "physical",
+ "name": nic_other,
+ "mac_address": mac_other,
+ },
+ {
+ "type": "physical",
+ "name": nic_extra,
+ "mac_address": mac_extra,
+ },
+ ],
+ }
+ oracle._ensure_netfailover_safe(netcfg)
+ self.assertEqual(expected_cfg, netcfg)
+
+ def test_checks_v2_type_ethernet_interfaces(self):
+ mac_addr, nic_name = "00:00:17:02:2b:b1", "ens3"
+ self.m_get_interfaces_by_mac.return_value = {
+ mac_addr: nic_name,
+ }
+ netcfg = {
+ "version": 2,
+ "ethernets": {
+ nic_name: {
+ "dhcp4": True,
+ "critical": True,
+ "set-name": nic_name,
+ "match": {"macaddress": mac_addr},
+ }
+ },
+ }
+ passed_netcfg = copy.copy(netcfg)
+ self.m_netfail_master.return_value = False
+ oracle._ensure_netfailover_safe(passed_netcfg)
+ self.assertEqual(netcfg, passed_netcfg)
+ self.assertEqual(
+ [mock.call(nic_name)], self.m_netfail_master.call_args_list
+ )
+
+ def test_skips_v2_non_ethernet_interfaces(self):
+ mac_addr, nic_name = "00:00:17:02:2b:b1", "wlps0"
+ self.m_get_interfaces_by_mac.return_value = {
+ mac_addr: nic_name,
+ }
+ netcfg = {
+ "version": 2,
+ "wifis": {
+ nic_name: {
+ "dhcp4": True,
+ "critical": True,
+ "set-name": nic_name,
+ "match": {"macaddress": mac_addr},
+ }
+ },
+ }
+ passed_netcfg = copy.copy(netcfg)
+ oracle._ensure_netfailover_safe(passed_netcfg)
+ self.assertEqual(netcfg, passed_netcfg)
+ self.assertEqual(0, self.m_netfail_master.call_count)
+
+ def test_removes_master_mac_property_v2(self):
+ nic_master, mac_master = "ens3", self.random_string()
+ nic_other, mac_other = "ens7", self.random_string()
+ nic_extra, mac_extra = "enp0s1f2", self.random_string()
+ self.m_get_interfaces_by_mac.return_value = {
+ mac_master: nic_master,
+ mac_other: nic_other,
+ mac_extra: nic_extra,
+ }
+ netcfg = {
+ "version": 2,
+ "ethernets": {
+ nic_extra: {
+ "dhcp4": True,
+ "set-name": nic_extra,
+ "match": {"macaddress": mac_extra},
+ },
+ nic_other: {
+ "dhcp4": True,
+ "set-name": nic_other,
+ "match": {"macaddress": mac_other},
+ },
+ nic_master: {
+ "dhcp4": True,
+ "set-name": nic_master,
+ "match": {"macaddress": mac_master},
+ },
+ },
+ }
+
+ def _is_netfail_master(iface):
+ if iface == "ens3":
+ return True
+ return False
+
+ self.m_netfail_master.side_effect = _is_netfail_master
+
+ expected_cfg = {
+ "version": 2,
+ "ethernets": {
+ nic_master: {"dhcp4": True, "match": {"name": nic_master}},
+ nic_extra: {
+ "dhcp4": True,
+ "set-name": nic_extra,
+ "match": {"macaddress": mac_extra},
+ },
+ nic_other: {
+ "dhcp4": True,
+ "set-name": nic_other,
+ "match": {"macaddress": mac_other},
+ },
+ },
+ }
+ oracle._ensure_netfailover_safe(netcfg)
+ import pprint
+
+ pprint.pprint(netcfg)
+ print("---- ^^ modified ^^ ---- vv original vv ----")
+ pprint.pprint(expected_cfg)
+ self.assertEqual(expected_cfg, netcfg)
+
+
+def _mock_v2_urls(httpretty):
+ def instance_callback(request, uri, response_headers):
+ print(response_headers)
+ assert request.headers.get("Authorization") == "Bearer Oracle"
+ return [200, response_headers, OPC_V2_METADATA]
+
+ def vnics_callback(request, uri, response_headers):
+ assert request.headers.get("Authorization") == "Bearer Oracle"
+ return [200, response_headers, OPC_BM_SECONDARY_VNIC_RESPONSE]
+
+ httpretty.register_uri(
+ httpretty.GET,
+ "http://169.254.169.254/opc/v2/instance/",
+ body=instance_callback,
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "http://169.254.169.254/opc/v2/vnics/",
+ body=vnics_callback,
+ )
+
+
+def _mock_no_v2_urls(httpretty):
+ httpretty.register_uri(
+ httpretty.GET,
+ "http://169.254.169.254/opc/v2/instance/",
+ status=404,
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "http://169.254.169.254/opc/v1/instance/",
+ body=OPC_V1_METADATA,
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "http://169.254.169.254/opc/v1/vnics/",
+ body=OPC_BM_SECONDARY_VNIC_RESPONSE,
+ )
+
+
+class TestReadOpcMetadata:
+ # See https://docs.pytest.org/en/stable/example
+ # /parametrize.html#parametrizing-conditional-raising
+ does_not_raise = ExitStack
+
+ @mock.patch("cloudinit.url_helper.time.sleep", lambda _: None)
+ @pytest.mark.parametrize(
+ "version,setup_urls,instance_data,fetch_vnics,vnics_data",
+ [
+ (
+ 2,
+ _mock_v2_urls,
+ json.loads(OPC_V2_METADATA),
+ True,
+ json.loads(OPC_BM_SECONDARY_VNIC_RESPONSE),
+ ),
+ (2, _mock_v2_urls, json.loads(OPC_V2_METADATA), False, None),
+ (
+ 1,
+ _mock_no_v2_urls,
+ json.loads(OPC_V1_METADATA),
+ True,
+ json.loads(OPC_BM_SECONDARY_VNIC_RESPONSE),
+ ),
+ (1, _mock_no_v2_urls, json.loads(OPC_V1_METADATA), False, None),
+ ],
+ )
+ def test_metadata_returned(
+ self,
+ version,
+ setup_urls,
+ instance_data,
+ fetch_vnics,
+ vnics_data,
+ httpretty,
+ ):
+ setup_urls(httpretty)
+ metadata = oracle.read_opc_metadata(fetch_vnics_data=fetch_vnics)
+
+ assert version == metadata.version
+ assert instance_data == metadata.instance_data
+ assert vnics_data == metadata.vnics_data
+
+ # No need to actually wait between retries in the tests
+ @mock.patch("cloudinit.url_helper.time.sleep", lambda _: None)
+ @pytest.mark.parametrize(
+ "v2_failure_count,v1_failure_count,expected_body,expectation",
+ [
+ (1, 0, json.loads(OPC_V2_METADATA), does_not_raise()),
+ (2, 0, json.loads(OPC_V2_METADATA), does_not_raise()),
+ (3, 0, json.loads(OPC_V1_METADATA), does_not_raise()),
+ (3, 1, json.loads(OPC_V1_METADATA), does_not_raise()),
+ (3, 2, json.loads(OPC_V1_METADATA), does_not_raise()),
+ (3, 3, None, pytest.raises(UrlError)),
+ ],
+ )
+ def test_retries(
+ self,
+ v2_failure_count,
+ v1_failure_count,
+ expected_body,
+ expectation,
+ httpretty,
+ ):
+ v2_responses = [httpretty.Response("", status=404)] * v2_failure_count
+ v2_responses.append(httpretty.Response(OPC_V2_METADATA))
+ v1_responses = [httpretty.Response("", status=404)] * v1_failure_count
+ v1_responses.append(httpretty.Response(OPC_V1_METADATA))
+
+ httpretty.register_uri(
+ httpretty.GET,
+ "http://169.254.169.254/opc/v1/instance/",
+ responses=v1_responses,
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "http://169.254.169.254/opc/v2/instance/",
+ responses=v2_responses,
+ )
+ with expectation:
+ assert expected_body == oracle.read_opc_metadata().instance_data
+
+
+class TestCommon_GetDataBehaviour:
+ """This test class tests behaviour common to iSCSI and non-iSCSI root.
+
+ It defines a fixture, parameterized_oracle_ds, which is used in all the
+ tests herein to test that the commonly expected behaviour is the same with
+ iSCSI root and without.
+
+ (As non-iSCSI root behaviour is a superset of iSCSI root behaviour this
+ class is implicitly also testing all iSCSI root behaviour so there is no
+ separate class for that case.)
+ """
+
+ @pytest.fixture(params=[True, False])
+ def parameterized_oracle_ds(self, request, oracle_ds):
+ """oracle_ds parameterized for iSCSI and non-iSCSI root respectively"""
+ is_iscsi_root = request.param
+ with ExitStack() as stack:
+ stack.enter_context(
+ mock.patch(
+ DS_PATH + "._is_iscsi_root", return_value=is_iscsi_root
+ )
+ )
+ if not is_iscsi_root:
+ stack.enter_context(
+ mock.patch(DS_PATH + ".net.find_fallback_nic")
+ )
+ stack.enter_context(
+ mock.patch(DS_PATH + ".dhcp.EphemeralDHCPv4")
+ )
+ yield oracle_ds
+
+ @mock.patch(
+ DS_PATH + "._is_platform_viable", mock.Mock(return_value=False)
+ )
+ def test_false_if_platform_not_viable(
+ self,
+ parameterized_oracle_ds,
+ ):
+ assert not parameterized_oracle_ds._get_data()
+
+ @pytest.mark.parametrize(
+ "keyname,expected_value",
+ (
+ ("availability-zone", "phx-ad-3"),
+ ("launch-index", 0),
+ ("local-hostname", "instance-20200320-1400"),
+ (
+ "instance-id",
+ "ocid1.instance.oc1.phx"
+ ".anyhqljtniwq6syc3nex55sep5w34qbwmw6TRUNCATED",
+ ),
+ ("name", "instance-20200320-1400"),
+ (
+ "public_keys",
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQ truncated",
+ ),
+ ),
+ )
+ def test_metadata_keys_set_correctly(
+ self,
+ keyname,
+ expected_value,
+ parameterized_oracle_ds,
+ ):
+ assert parameterized_oracle_ds._get_data()
+ assert expected_value == parameterized_oracle_ds.metadata[keyname]
+
+ @pytest.mark.parametrize(
+ "attribute_name,expected_value",
+ [
+ ("_crawled_metadata", json.loads(OPC_V2_METADATA)),
+ (
+ "userdata_raw",
+ base64.b64decode(b"IyEvYmluL3NoCnRvdWNoIC90bXAvZm9v"),
+ ),
+ ("system_uuid", "my-test-uuid"),
+ ],
+ )
+ @mock.patch(
+ DS_PATH + "._read_system_uuid", mock.Mock(return_value="my-test-uuid")
+ )
+ def test_attributes_set_correctly(
+ self,
+ attribute_name,
+ expected_value,
+ parameterized_oracle_ds,
+ ):
+ assert parameterized_oracle_ds._get_data()
+ assert expected_value == getattr(
+ parameterized_oracle_ds, attribute_name
+ )
+
+ @pytest.mark.parametrize(
+ "ssh_keys,expected_value",
+ [
+ # No SSH keys in metadata => no keys detected
+ (None, []),
+ # Empty SSH keys in metadata => no keys detected
+ ("", []),
+ # Single SSH key in metadata => single key detected
+ ("ssh-rsa ... test@test", ["ssh-rsa ... test@test"]),
+ # Multiple SSH keys in metadata => multiple keys detected
+ (
+ "ssh-rsa ... test@test\nssh-rsa ... test2@test2",
+ ["ssh-rsa ... test@test", "ssh-rsa ... test2@test2"],
+ ),
+ ],
+ )
+ def test_public_keys_handled_correctly(
+ self, ssh_keys, expected_value, parameterized_oracle_ds
+ ):
+ instance_data = json.loads(OPC_V1_METADATA)
+ if ssh_keys is None:
+ del instance_data["metadata"]["ssh_authorized_keys"]
+ else:
+ instance_data["metadata"]["ssh_authorized_keys"] = ssh_keys
+ metadata = OpcMetadata(None, instance_data, None)
+ with mock.patch(
+ DS_PATH + ".read_opc_metadata",
+ mock.Mock(return_value=metadata),
+ ):
+ assert parameterized_oracle_ds._get_data()
+ assert (
+ expected_value == parameterized_oracle_ds.get_public_ssh_keys()
+ )
+
+ def test_missing_user_data_handled_gracefully(
+ self, parameterized_oracle_ds
+ ):
+ instance_data = json.loads(OPC_V1_METADATA)
+ del instance_data["metadata"]["user_data"]
+ metadata = OpcMetadata(None, instance_data, None)
+ with mock.patch(
+ DS_PATH + ".read_opc_metadata",
+ mock.Mock(return_value=metadata),
+ ):
+ assert parameterized_oracle_ds._get_data()
+
+ assert parameterized_oracle_ds.userdata_raw is None
+
+ def test_missing_metadata_handled_gracefully(
+ self, parameterized_oracle_ds
+ ):
+ instance_data = json.loads(OPC_V1_METADATA)
+ del instance_data["metadata"]
+ metadata = OpcMetadata(None, instance_data, None)
+ with mock.patch(
+ DS_PATH + ".read_opc_metadata",
+ mock.Mock(return_value=metadata),
+ ):
+ assert parameterized_oracle_ds._get_data()
+
+ assert parameterized_oracle_ds.userdata_raw is None
+ assert [] == parameterized_oracle_ds.get_public_ssh_keys()
+
+
+@mock.patch(DS_PATH + "._is_iscsi_root", lambda: False)
+class TestNonIscsiRoot_GetDataBehaviour:
+ @mock.patch(DS_PATH + ".dhcp.EphemeralDHCPv4")
+ @mock.patch(DS_PATH + ".net.find_fallback_nic")
+ def test_read_opc_metadata_called_with_ephemeral_dhcp(
+ self, m_find_fallback_nic, m_EphemeralDHCPv4, oracle_ds
+ ):
+ in_context_manager = False
+
+ def enter_context_manager():
+ nonlocal in_context_manager
+ in_context_manager = True
+
+ def exit_context_manager(*args):
+ nonlocal in_context_manager
+ in_context_manager = False
+
+ m_EphemeralDHCPv4.return_value.__enter__.side_effect = (
+ enter_context_manager
+ )
+ m_EphemeralDHCPv4.return_value.__exit__.side_effect = (
+ exit_context_manager
+ )
+
+ def assert_in_context_manager(**kwargs):
+ assert in_context_manager
+ return mock.MagicMock()
+
+ with mock.patch(
+ DS_PATH + ".read_opc_metadata",
+ mock.Mock(side_effect=assert_in_context_manager),
+ ):
+ assert oracle_ds._get_data()
+
+ assert [
+ mock.call(
+ iface=m_find_fallback_nic.return_value,
+ connectivity_url_data={
+ "headers": {"Authorization": "Bearer Oracle"},
+ "url": "http://169.254.169.254/opc/v2/instance/",
+ },
+ )
+ ] == m_EphemeralDHCPv4.call_args_list
+
+
+@mock.patch(DS_PATH + ".get_interfaces_by_mac", lambda: {})
+@mock.patch(DS_PATH + ".cmdline.read_initramfs_config")
+class TestNetworkConfig:
+ def test_network_config_cached(self, m_read_initramfs_config, oracle_ds):
+ """.network_config should be cached"""
+ assert 0 == m_read_initramfs_config.call_count
+ oracle_ds.network_config # pylint: disable=pointless-statement
+ assert 1 == m_read_initramfs_config.call_count
+ oracle_ds.network_config # pylint: disable=pointless-statement
+ assert 1 == m_read_initramfs_config.call_count
+
+ def test_network_cmdline(self, m_read_initramfs_config, oracle_ds):
+ """network_config should prefer initramfs config over fallback"""
+ ncfg = {"version": 1, "config": [{"a": "b"}]}
+ m_read_initramfs_config.return_value = copy.deepcopy(ncfg)
+
+ assert ncfg == oracle_ds.network_config
+ assert 0 == oracle_ds.distro.generate_fallback_config.call_count
+
+ def test_network_fallback(self, m_read_initramfs_config, oracle_ds):
+ """network_config should prefer initramfs config over fallback"""
+ ncfg = {"version": 1, "config": [{"a": "b"}]}
+
+ m_read_initramfs_config.return_value = None
+ oracle_ds.distro.generate_fallback_config.return_value = copy.deepcopy(
+ ncfg
+ )
+
+ assert ncfg == oracle_ds.network_config
+
+ @pytest.mark.parametrize(
+ "configure_secondary_nics,expect_secondary_nics",
+ [(True, True), (False, False), (None, False)],
+ )
+ def test_secondary_nic_addition(
+ self,
+ m_read_initramfs_config,
+ configure_secondary_nics,
+ expect_secondary_nics,
+ oracle_ds,
+ ):
+ """Test that _add_network_config_from_opc_imds is called as expected
+
+ (configure_secondary_nics=None is used to test the default behaviour.)
+ """
+ m_read_initramfs_config.return_value = {"version": 1, "config": []}
+
+ if configure_secondary_nics is not None:
+ oracle_ds.ds_cfg[
+ "configure_secondary_nics"
+ ] = configure_secondary_nics
+
+ def side_effect(self):
+ self._network_config["secondary_added"] = mock.sentinel.needle
+
+ oracle_ds._vnics_data = "DummyData"
+ with mock.patch.object(
+ oracle.DataSourceOracle,
+ "_add_network_config_from_opc_imds",
+ new=side_effect,
+ ):
+ was_secondary_added = "secondary_added" in oracle_ds.network_config
+ assert expect_secondary_nics == was_secondary_added
+
+ def test_secondary_nic_failure_isnt_blocking(
+ self,
+ m_read_initramfs_config,
+ caplog,
+ oracle_ds,
+ ):
+ oracle_ds.ds_cfg["configure_secondary_nics"] = True
+ oracle_ds._vnics_data = "DummyData"
+
+ with mock.patch.object(
+ oracle.DataSourceOracle,
+ "_add_network_config_from_opc_imds",
+ side_effect=Exception(),
+ ):
+ network_config = oracle_ds.network_config
+ assert network_config == m_read_initramfs_config.return_value
+ assert "Failed to parse secondary network configuration" in caplog.text
+
+ def test_ds_network_cfg_order(self, _m):
+ """Ensure that DS net config is preferred over initramfs config
+ but less than system config."""
+ config_sources = oracle.DataSourceOracle.network_config_sources
+ system_idx = config_sources.index(NetworkConfigSource.system_cfg)
+ ds_idx = config_sources.index(NetworkConfigSource.ds)
+ initramfs_idx = config_sources.index(NetworkConfigSource.initramfs)
+ assert system_idx < ds_idx < initramfs_idx
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_ovf.py b/tests/unittests/sources/test_ovf.py
new file mode 100644
index 00000000..c2c87f12
--- /dev/null
+++ b/tests/unittests/sources/test_ovf.py
@@ -0,0 +1,1237 @@
+# Copyright (C) 2016 Canonical Ltd.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import base64
+import os
+from collections import OrderedDict
+from textwrap import dedent
+
+from cloudinit import subp, util
+from cloudinit.helpers import Paths
+from cloudinit.safeyaml import YAMLError
+from cloudinit.sources import DataSourceOVF as dsovf
+from cloudinit.sources.helpers.vmware.imc.config_custom_script import (
+ CustomScriptNotFound,
+)
+from tests.unittests.helpers import CiTestCase, mock, wrap_and_call
+
+MPATH = "cloudinit.sources.DataSourceOVF."
+
+NOT_FOUND = None
+
+OVF_ENV_CONTENT = """<?xml version="1.0" encoding="UTF-8"?>
+<Environment xmlns="http://schemas.dmtf.org/ovf/environment/1"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xmlns:oe="http://schemas.dmtf.org/ovf/environment/1"
+ xsi:schemaLocation="http://schemas.dmtf.org/ovf/environment/1 ../dsp8027.xsd"
+ oe:id="WebTier">
+ <!-- Information about hypervisor platform -->
+ <oe:PlatformSection>
+ <Kind>ESX Server</Kind>
+ <Version>3.0.1</Version>
+ <Vendor>VMware, Inc.</Vendor>
+ <Locale>en_US</Locale>
+ </oe:PlatformSection>
+ <!--- Properties defined for this virtual machine -->
+ <PropertySection>
+{properties}
+ </PropertySection>
+</Environment>
+"""
+
+
+def fill_properties(props, template=OVF_ENV_CONTENT):
+ lines = []
+ prop_tmpl = '<Property oe:key="{key}" oe:value="{val}"/>'
+ for key, val in props.items():
+ lines.append(prop_tmpl.format(key=key, val=val))
+ indent = " "
+ properties = "".join([indent + line + "\n" for line in lines])
+ return template.format(properties=properties)
+
+
+class TestReadOvfEnv(CiTestCase):
+ def test_with_b64_userdata(self):
+ user_data = "#!/bin/sh\necho hello world\n"
+ user_data_b64 = base64.b64encode(user_data.encode()).decode()
+ props = {
+ "user-data": user_data_b64,
+ "password": "passw0rd",
+ "instance-id": "inst-001",
+ }
+ env = fill_properties(props)
+ md, ud, cfg = dsovf.read_ovf_environment(env)
+ self.assertEqual({"instance-id": "inst-001"}, md)
+ self.assertEqual(user_data.encode(), ud)
+ self.assertEqual({"password": "passw0rd"}, cfg)
+
+ def test_with_non_b64_userdata(self):
+ user_data = "my-user-data"
+ props = {"user-data": user_data, "instance-id": "inst-001"}
+ env = fill_properties(props)
+ md, ud, cfg = dsovf.read_ovf_environment(env)
+ self.assertEqual({"instance-id": "inst-001"}, md)
+ self.assertEqual(user_data.encode(), ud)
+ self.assertEqual({}, cfg)
+
+ def test_with_no_userdata(self):
+ props = {"password": "passw0rd", "instance-id": "inst-001"}
+ env = fill_properties(props)
+ md, ud, cfg = dsovf.read_ovf_environment(env)
+ self.assertEqual({"instance-id": "inst-001"}, md)
+ self.assertEqual({"password": "passw0rd"}, cfg)
+ self.assertIsNone(ud)
+
+ def test_with_b64_network_config_enable_read_network(self):
+ network_config = dedent(
+ """\
+ network:
+ version: 2
+ ethernets:
+ nics:
+ nameservers:
+ addresses:
+ - 127.0.0.53
+ search:
+ - eng.vmware.com
+ - vmware.com
+ match:
+ name: eth*
+ gateway4: 10.10.10.253
+ dhcp4: false
+ addresses:
+ - 10.10.10.1/24
+ """
+ )
+ network_config_b64 = base64.b64encode(network_config.encode()).decode()
+ props = {
+ "network-config": network_config_b64,
+ "password": "passw0rd",
+ "instance-id": "inst-001",
+ }
+ env = fill_properties(props)
+ md, ud, cfg = dsovf.read_ovf_environment(env, True)
+ self.assertEqual("inst-001", md["instance-id"])
+ self.assertEqual({"password": "passw0rd"}, cfg)
+ self.assertEqual(
+ {
+ "version": 2,
+ "ethernets": {
+ "nics": {
+ "nameservers": {
+ "addresses": ["127.0.0.53"],
+ "search": ["eng.vmware.com", "vmware.com"],
+ },
+ "match": {"name": "eth*"},
+ "gateway4": "10.10.10.253",
+ "dhcp4": False,
+ "addresses": ["10.10.10.1/24"],
+ }
+ },
+ },
+ md["network-config"],
+ )
+ self.assertIsNone(ud)
+
+ def test_with_non_b64_network_config_enable_read_network(self):
+ network_config = dedent(
+ """\
+ network:
+ version: 2
+ ethernets:
+ nics:
+ nameservers:
+ addresses:
+ - 127.0.0.53
+ search:
+ - eng.vmware.com
+ - vmware.com
+ match:
+ name: eth*
+ gateway4: 10.10.10.253
+ dhcp4: false
+ addresses:
+ - 10.10.10.1/24
+ """
+ )
+ props = {
+ "network-config": network_config,
+ "password": "passw0rd",
+ "instance-id": "inst-001",
+ }
+ env = fill_properties(props)
+ md, ud, cfg = dsovf.read_ovf_environment(env, True)
+ self.assertEqual({"instance-id": "inst-001"}, md)
+ self.assertEqual({"password": "passw0rd"}, cfg)
+ self.assertIsNone(ud)
+
+ def test_with_b64_network_config_disable_read_network(self):
+ network_config = dedent(
+ """\
+ network:
+ version: 2
+ ethernets:
+ nics:
+ nameservers:
+ addresses:
+ - 127.0.0.53
+ search:
+ - eng.vmware.com
+ - vmware.com
+ match:
+ name: eth*
+ gateway4: 10.10.10.253
+ dhcp4: false
+ addresses:
+ - 10.10.10.1/24
+ """
+ )
+ network_config_b64 = base64.b64encode(network_config.encode()).decode()
+ props = {
+ "network-config": network_config_b64,
+ "password": "passw0rd",
+ "instance-id": "inst-001",
+ }
+ env = fill_properties(props)
+ md, ud, cfg = dsovf.read_ovf_environment(env)
+ self.assertEqual({"instance-id": "inst-001"}, md)
+ self.assertEqual({"password": "passw0rd"}, cfg)
+ self.assertIsNone(ud)
+
+
+class TestMarkerFiles(CiTestCase):
+ def setUp(self):
+ super(TestMarkerFiles, self).setUp()
+ self.tdir = self.tmp_dir()
+
+ def test_false_when_markerid_none(self):
+ """Return False when markerid provided is None."""
+ self.assertFalse(
+ dsovf.check_marker_exists(markerid=None, marker_dir=self.tdir)
+ )
+
+ def test_markerid_file_exist(self):
+ """Return False when markerid file path does not exist,
+ True otherwise."""
+ self.assertFalse(dsovf.check_marker_exists("123", self.tdir))
+
+ marker_file = self.tmp_path(".markerfile-123.txt", self.tdir)
+ util.write_file(marker_file, "")
+ self.assertTrue(dsovf.check_marker_exists("123", self.tdir))
+
+ def test_marker_file_setup(self):
+ """Test creation of marker files."""
+ markerfilepath = self.tmp_path(".markerfile-hi.txt", self.tdir)
+ self.assertFalse(os.path.exists(markerfilepath))
+ dsovf.setup_marker_files(markerid="hi", marker_dir=self.tdir)
+ self.assertTrue(os.path.exists(markerfilepath))
+
+
+class TestDatasourceOVF(CiTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestDatasourceOVF, self).setUp()
+ self.datasource = dsovf.DataSourceOVF
+ self.tdir = self.tmp_dir()
+
+ def test_get_data_false_on_none_dmi_data(self):
+ """When dmi for system-product-name is None, get_data returns False."""
+ paths = Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(sys_cfg={}, distro={}, paths=paths)
+ retcode = wrap_and_call(
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": None,
+ "transport_iso9660": NOT_FOUND,
+ "transport_vmware_guestinfo": NOT_FOUND,
+ },
+ ds.get_data,
+ )
+ self.assertFalse(retcode, "Expected False return from ds.get_data")
+ self.assertIn(
+ "DEBUG: No system-product-name found", self.logs.getvalue()
+ )
+
+ def test_get_data_vmware_customization_disabled(self):
+ """When vmware customization is disabled via sys_cfg and
+ allow_raw_data is disabled via ds_cfg, log a message.
+ """
+ paths = Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={
+ "disable_vmware_customization": True,
+ "datasource": {"OVF": {"allow_raw_data": False}},
+ },
+ distro={},
+ paths=paths,
+ )
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [MISC]
+ MARKER-ID = 12345345
+ """
+ )
+ util.write_file(conf_file, conf_content)
+ retcode = wrap_and_call(
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "transport_iso9660": NOT_FOUND,
+ "transport_vmware_guestinfo": NOT_FOUND,
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ },
+ ds.get_data,
+ )
+ self.assertFalse(retcode, "Expected False return from ds.get_data")
+ self.assertIn(
+ "DEBUG: Customization for VMware platform is disabled.",
+ self.logs.getvalue(),
+ )
+
+ def test_get_data_vmware_customization_sys_cfg_disabled(self):
+ """When vmware customization is disabled via sys_cfg and
+ no meta data is found, log a message.
+ """
+ paths = Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={
+ "disable_vmware_customization": True,
+ "datasource": {"OVF": {"allow_raw_data": True}},
+ },
+ distro={},
+ paths=paths,
+ )
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [MISC]
+ MARKER-ID = 12345345
+ """
+ )
+ util.write_file(conf_file, conf_content)
+ retcode = wrap_and_call(
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "transport_iso9660": NOT_FOUND,
+ "transport_vmware_guestinfo": NOT_FOUND,
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ },
+ ds.get_data,
+ )
+ self.assertFalse(retcode, "Expected False return from ds.get_data")
+ self.assertIn(
+ "DEBUG: Customization using VMware config is disabled.",
+ self.logs.getvalue(),
+ )
+
+ def test_get_data_allow_raw_data_disabled(self):
+ """When allow_raw_data is disabled via ds_cfg and
+ meta data is found, log a message.
+ """
+ paths = Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={
+ "disable_vmware_customization": False,
+ "datasource": {"OVF": {"allow_raw_data": False}},
+ },
+ distro={},
+ paths=paths,
+ )
+
+ # Prepare the conf file
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [CLOUDINIT]
+ METADATA = test-meta
+ """
+ )
+ util.write_file(conf_file, conf_content)
+ # Prepare the meta data file
+ metadata_file = self.tmp_path("test-meta", self.tdir)
+ util.write_file(metadata_file, "This is meta data")
+ retcode = wrap_and_call(
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "transport_iso9660": NOT_FOUND,
+ "transport_vmware_guestinfo": NOT_FOUND,
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ "collect_imc_file_paths": [self.tdir + "/test-meta", "", ""],
+ },
+ ds.get_data,
+ )
+ self.assertFalse(retcode, "Expected False return from ds.get_data")
+ self.assertIn(
+ "DEBUG: Customization using raw data is disabled.",
+ self.logs.getvalue(),
+ )
+
+ def test_get_data_vmware_customization_enabled(self):
+ """When cloud-init workflow for vmware is enabled via sys_cfg log a
+ message.
+ """
+ paths = Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={"disable_vmware_customization": False},
+ distro={},
+ paths=paths,
+ )
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [CUSTOM-SCRIPT]
+ SCRIPT-NAME = test-script
+ [MISC]
+ MARKER-ID = 12345345
+ """
+ )
+ util.write_file(conf_file, conf_content)
+ with mock.patch(MPATH + "get_tools_config", return_value="true"):
+ with self.assertRaises(CustomScriptNotFound) as context:
+ wrap_and_call(
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ "get_nics_to_enable": "",
+ },
+ ds.get_data,
+ )
+ customscript = self.tmp_path("test-script", self.tdir)
+ self.assertIn(
+ "Script %s not found!!" % customscript, str(context.exception)
+ )
+
+ def test_get_data_cust_script_disabled(self):
+ """If custom script is disabled by VMware tools configuration,
+ raise a RuntimeError.
+ """
+ paths = Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={"disable_vmware_customization": False},
+ distro={},
+ paths=paths,
+ )
+ # Prepare the conf file
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [CUSTOM-SCRIPT]
+ SCRIPT-NAME = test-script
+ [MISC]
+ MARKER-ID = 12345346
+ """
+ )
+ util.write_file(conf_file, conf_content)
+ # Prepare the custom sript
+ customscript = self.tmp_path("test-script", self.tdir)
+ util.write_file(customscript, "This is the post cust script")
+
+ with mock.patch(MPATH + "get_tools_config", return_value="invalid"):
+ with mock.patch(
+ MPATH + "set_customization_status", return_value=("msg", b"")
+ ):
+ with self.assertRaises(RuntimeError) as context:
+ wrap_and_call(
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ "get_nics_to_enable": "",
+ },
+ ds.get_data,
+ )
+ self.assertIn(
+ "Custom script is disabled by VM Administrator",
+ str(context.exception),
+ )
+
+ def test_get_data_cust_script_enabled(self):
+ """If custom script is enabled by VMware tools configuration,
+ execute the script.
+ """
+ paths = Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={"disable_vmware_customization": False},
+ distro={},
+ paths=paths,
+ )
+ # Prepare the conf file
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [CUSTOM-SCRIPT]
+ SCRIPT-NAME = test-script
+ [MISC]
+ MARKER-ID = 12345346
+ """
+ )
+ util.write_file(conf_file, conf_content)
+
+ # Mock custom script is enabled by return true when calling
+ # get_tools_config
+ with mock.patch(MPATH + "get_tools_config", return_value="true"):
+ with mock.patch(
+ MPATH + "set_customization_status", return_value=("msg", b"")
+ ):
+ with self.assertRaises(CustomScriptNotFound) as context:
+ wrap_and_call(
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ "get_nics_to_enable": "",
+ },
+ ds.get_data,
+ )
+ # Verify custom script is trying to be executed
+ customscript = self.tmp_path("test-script", self.tdir)
+ self.assertIn(
+ "Script %s not found!!" % customscript, str(context.exception)
+ )
+
+ def test_get_data_force_run_post_script_is_yes(self):
+ """If DEFAULT-RUN-POST-CUST-SCRIPT is yes, custom script could run if
+ enable-custom-scripts is not defined in VM Tools configuration
+ """
+ paths = Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={"disable_vmware_customization": False},
+ distro={},
+ paths=paths,
+ )
+ # Prepare the conf file
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ # set DEFAULT-RUN-POST-CUST-SCRIPT = yes so that enable-custom-scripts
+ # default value is TRUE
+ conf_content = dedent(
+ """\
+ [CUSTOM-SCRIPT]
+ SCRIPT-NAME = test-script
+ [MISC]
+ MARKER-ID = 12345346
+ DEFAULT-RUN-POST-CUST-SCRIPT = yes
+ """
+ )
+ util.write_file(conf_file, conf_content)
+
+ # Mock get_tools_config(section, key, defaultVal) to return
+ # defaultVal
+ def my_get_tools_config(*args, **kwargs):
+ return args[2]
+
+ with mock.patch(
+ MPATH + "get_tools_config", side_effect=my_get_tools_config
+ ):
+ with mock.patch(
+ MPATH + "set_customization_status", return_value=("msg", b"")
+ ):
+ with self.assertRaises(CustomScriptNotFound) as context:
+ wrap_and_call(
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ "get_nics_to_enable": "",
+ },
+ ds.get_data,
+ )
+ # Verify custom script still runs although it is
+ # disabled by VMware Tools
+ customscript = self.tmp_path("test-script", self.tdir)
+ self.assertIn(
+ "Script %s not found!!" % customscript, str(context.exception)
+ )
+
+ def test_get_data_non_vmware_seed_platform_info(self):
+ """Platform info properly reports when on non-vmware platforms."""
+ paths = Paths({"cloud_dir": self.tdir, "run_dir": self.tdir})
+ # Write ovf-env.xml seed file
+ seed_dir = self.tmp_path("seed", dir=self.tdir)
+ ovf_env = self.tmp_path("ovf-env.xml", dir=seed_dir)
+ util.write_file(ovf_env, OVF_ENV_CONTENT)
+ ds = self.datasource(sys_cfg={}, distro={}, paths=paths)
+
+ self.assertEqual("ovf", ds.cloud_name)
+ self.assertEqual("ovf", ds.platform_type)
+ with mock.patch(MPATH + "dmi.read_dmi_data", return_value="!VMware"):
+ with mock.patch(MPATH + "transport_vmware_guestinfo") as m_guestd:
+ with mock.patch(MPATH + "transport_iso9660") as m_iso9660:
+ m_iso9660.return_value = NOT_FOUND
+ m_guestd.return_value = NOT_FOUND
+ self.assertTrue(ds.get_data())
+ self.assertEqual(
+ "ovf (%s/seed/ovf-env.xml)" % self.tdir, ds.subplatform
+ )
+
+ def test_get_data_vmware_seed_platform_info(self):
+ """Platform info properly reports when on VMware platform."""
+ paths = Paths({"cloud_dir": self.tdir, "run_dir": self.tdir})
+ # Write ovf-env.xml seed file
+ seed_dir = self.tmp_path("seed", dir=self.tdir)
+ ovf_env = self.tmp_path("ovf-env.xml", dir=seed_dir)
+ util.write_file(ovf_env, OVF_ENV_CONTENT)
+ ds = self.datasource(sys_cfg={}, distro={}, paths=paths)
+
+ self.assertEqual("ovf", ds.cloud_name)
+ self.assertEqual("ovf", ds.platform_type)
+ with mock.patch(MPATH + "dmi.read_dmi_data", return_value="VMWare"):
+ with mock.patch(MPATH + "transport_vmware_guestinfo") as m_guestd:
+ with mock.patch(MPATH + "transport_iso9660") as m_iso9660:
+ m_iso9660.return_value = NOT_FOUND
+ m_guestd.return_value = NOT_FOUND
+ self.assertTrue(ds.get_data())
+ self.assertEqual(
+ "vmware (%s/seed/ovf-env.xml)" % self.tdir,
+ ds.subplatform,
+ )
+
+ @mock.patch("cloudinit.subp.subp")
+ @mock.patch("cloudinit.sources.DataSource.persist_instance_data")
+ def test_get_data_vmware_guestinfo_with_network_config(
+ self, m_persist, m_subp
+ ):
+ self._test_get_data_with_network_config(guestinfo=False, iso=True)
+
+ @mock.patch("cloudinit.subp.subp")
+ @mock.patch("cloudinit.sources.DataSource.persist_instance_data")
+ def test_get_data_iso9660_with_network_config(self, m_persist, m_subp):
+ self._test_get_data_with_network_config(guestinfo=True, iso=False)
+
+ def _test_get_data_with_network_config(self, guestinfo, iso):
+ network_config = dedent(
+ """\
+ network:
+ version: 2
+ ethernets:
+ nics:
+ nameservers:
+ addresses:
+ - 127.0.0.53
+ search:
+ - vmware.com
+ match:
+ name: eth*
+ gateway4: 10.10.10.253
+ dhcp4: false
+ addresses:
+ - 10.10.10.1/24
+ """
+ )
+ network_config_b64 = base64.b64encode(network_config.encode()).decode()
+ props = {
+ "network-config": network_config_b64,
+ "password": "passw0rd",
+ "instance-id": "inst-001",
+ }
+ env = fill_properties(props)
+ paths = Paths({"cloud_dir": self.tdir, "run_dir": self.tdir})
+ ds = self.datasource(sys_cfg={}, distro={}, paths=paths)
+ with mock.patch(
+ MPATH + "transport_vmware_guestinfo",
+ return_value=env if guestinfo else NOT_FOUND,
+ ):
+ with mock.patch(
+ MPATH + "transport_iso9660",
+ return_value=env if iso else NOT_FOUND,
+ ):
+ self.assertTrue(ds.get_data())
+ self.assertEqual("inst-001", ds.metadata["instance-id"])
+ self.assertEqual(
+ {
+ "version": 2,
+ "ethernets": {
+ "nics": {
+ "nameservers": {
+ "addresses": ["127.0.0.53"],
+ "search": ["vmware.com"],
+ },
+ "match": {"name": "eth*"},
+ "gateway4": "10.10.10.253",
+ "dhcp4": False,
+ "addresses": ["10.10.10.1/24"],
+ }
+ },
+ },
+ ds.network_config,
+ )
+
+ def test_get_data_cloudinit_metadata_json(self):
+ """Test metadata can be loaded to cloud-init metadata and network.
+ The metadata format is json.
+ """
+ paths = Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={"disable_vmware_customization": True},
+ distro={},
+ paths=paths,
+ )
+ # Prepare the conf file
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [CLOUDINIT]
+ METADATA = test-meta
+ """
+ )
+ util.write_file(conf_file, conf_content)
+ # Prepare the meta data file
+ metadata_file = self.tmp_path("test-meta", self.tdir)
+ metadata_content = dedent(
+ """\
+ {
+ "instance-id": "cloud-vm",
+ "local-hostname": "my-host.domain.com",
+ "network": {
+ "version": 2,
+ "ethernets": {
+ "eths": {
+ "match": {
+ "name": "ens*"
+ },
+ "dhcp4": true
+ }
+ }
+ }
+ }
+ """
+ )
+ util.write_file(metadata_file, metadata_content)
+
+ with mock.patch(
+ MPATH + "set_customization_status", return_value=("msg", b"")
+ ):
+ result = wrap_and_call(
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ "collect_imc_file_paths": [
+ self.tdir + "/test-meta",
+ "",
+ "",
+ ],
+ "get_nics_to_enable": "",
+ },
+ ds._get_data,
+ )
+
+ self.assertTrue(result)
+ self.assertEqual("cloud-vm", ds.metadata["instance-id"])
+ self.assertEqual("my-host.domain.com", ds.metadata["local-hostname"])
+ self.assertEqual(2, ds.network_config["version"])
+ self.assertTrue(ds.network_config["ethernets"]["eths"]["dhcp4"])
+
+ def test_get_data_cloudinit_metadata_yaml(self):
+ """Test metadata can be loaded to cloud-init metadata and network.
+ The metadata format is yaml.
+ """
+ paths = Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={"disable_vmware_customization": True},
+ distro={},
+ paths=paths,
+ )
+ # Prepare the conf file
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [CLOUDINIT]
+ METADATA = test-meta
+ """
+ )
+ util.write_file(conf_file, conf_content)
+ # Prepare the meta data file
+ metadata_file = self.tmp_path("test-meta", self.tdir)
+ metadata_content = dedent(
+ """\
+ instance-id: cloud-vm
+ local-hostname: my-host.domain.com
+ network:
+ version: 2
+ ethernets:
+ nics:
+ match:
+ name: ens*
+ dhcp4: yes
+ """
+ )
+ util.write_file(metadata_file, metadata_content)
+
+ with mock.patch(
+ MPATH + "set_customization_status", return_value=("msg", b"")
+ ):
+ result = wrap_and_call(
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ "collect_imc_file_paths": [
+ self.tdir + "/test-meta",
+ "",
+ "",
+ ],
+ "get_nics_to_enable": "",
+ },
+ ds._get_data,
+ )
+
+ self.assertTrue(result)
+ self.assertEqual("cloud-vm", ds.metadata["instance-id"])
+ self.assertEqual("my-host.domain.com", ds.metadata["local-hostname"])
+ self.assertEqual(2, ds.network_config["version"])
+ self.assertTrue(ds.network_config["ethernets"]["nics"]["dhcp4"])
+
+ def test_get_data_cloudinit_metadata_not_valid(self):
+ """Test metadata is not JSON or YAML format."""
+ paths = Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={"disable_vmware_customization": True},
+ distro={},
+ paths=paths,
+ )
+
+ # Prepare the conf file
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [CLOUDINIT]
+ METADATA = test-meta
+ """
+ )
+ util.write_file(conf_file, conf_content)
+
+ # Prepare the meta data file
+ metadata_file = self.tmp_path("test-meta", self.tdir)
+ metadata_content = "[This is not json or yaml format]a=b"
+ util.write_file(metadata_file, metadata_content)
+
+ with mock.patch(
+ MPATH + "set_customization_status", return_value=("msg", b"")
+ ):
+ with self.assertRaises(YAMLError) as context:
+ wrap_and_call(
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ "collect_imc_file_paths": [
+ self.tdir + "/test-meta",
+ "",
+ "",
+ ],
+ "get_nics_to_enable": "",
+ },
+ ds.get_data,
+ )
+
+ self.assertIn(
+ "expected '<document start>', but found '<scalar>'",
+ str(context.exception),
+ )
+
+ def test_get_data_cloudinit_metadata_not_found(self):
+ """Test metadata file can't be found."""
+ paths = Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={"disable_vmware_customization": True},
+ distro={},
+ paths=paths,
+ )
+ # Prepare the conf file
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [CLOUDINIT]
+ METADATA = test-meta
+ """
+ )
+ util.write_file(conf_file, conf_content)
+ # Don't prepare the meta data file
+
+ with mock.patch(
+ MPATH + "set_customization_status", return_value=("msg", b"")
+ ):
+ with self.assertRaises(FileNotFoundError) as context:
+ wrap_and_call(
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ "get_nics_to_enable": "",
+ },
+ ds.get_data,
+ )
+
+ self.assertIn("is not found", str(context.exception))
+
+ def test_get_data_cloudinit_userdata(self):
+ """Test user data can be loaded to cloud-init user data."""
+ paths = Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={"disable_vmware_customization": False},
+ distro={},
+ paths=paths,
+ )
+
+ # Prepare the conf file
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [CLOUDINIT]
+ METADATA = test-meta
+ USERDATA = test-user
+ """
+ )
+ util.write_file(conf_file, conf_content)
+
+ # Prepare the meta data file
+ metadata_file = self.tmp_path("test-meta", self.tdir)
+ metadata_content = dedent(
+ """\
+ instance-id: cloud-vm
+ local-hostname: my-host.domain.com
+ network:
+ version: 2
+ ethernets:
+ nics:
+ match:
+ name: ens*
+ dhcp4: yes
+ """
+ )
+ util.write_file(metadata_file, metadata_content)
+
+ # Prepare the user data file
+ userdata_file = self.tmp_path("test-user", self.tdir)
+ userdata_content = "This is the user data"
+ util.write_file(userdata_file, userdata_content)
+
+ with mock.patch(
+ MPATH + "set_customization_status", return_value=("msg", b"")
+ ):
+ result = wrap_and_call(
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ "collect_imc_file_paths": [
+ self.tdir + "/test-meta",
+ self.tdir + "/test-user",
+ "",
+ ],
+ "get_nics_to_enable": "",
+ },
+ ds._get_data,
+ )
+
+ self.assertTrue(result)
+ self.assertEqual("cloud-vm", ds.metadata["instance-id"])
+ self.assertEqual(userdata_content, ds.userdata_raw)
+
+ def test_get_data_cloudinit_userdata_not_found(self):
+ """Test userdata file can't be found."""
+ paths = Paths({"cloud_dir": self.tdir})
+ ds = self.datasource(
+ sys_cfg={"disable_vmware_customization": True},
+ distro={},
+ paths=paths,
+ )
+
+ # Prepare the conf file
+ conf_file = self.tmp_path("test-cust", self.tdir)
+ conf_content = dedent(
+ """\
+ [CLOUDINIT]
+ METADATA = test-meta
+ USERDATA = test-user
+ """
+ )
+ util.write_file(conf_file, conf_content)
+
+ # Prepare the meta data file
+ metadata_file = self.tmp_path("test-meta", self.tdir)
+ metadata_content = dedent(
+ """\
+ instance-id: cloud-vm
+ local-hostname: my-host.domain.com
+ network:
+ version: 2
+ ethernets:
+ nics:
+ match:
+ name: ens*
+ dhcp4: yes
+ """
+ )
+ util.write_file(metadata_file, metadata_content)
+
+ # Don't prepare the user data file
+
+ with mock.patch(
+ MPATH + "set_customization_status", return_value=("msg", b"")
+ ):
+ with self.assertRaises(FileNotFoundError) as context:
+ wrap_and_call(
+ "cloudinit.sources.DataSourceOVF",
+ {
+ "dmi.read_dmi_data": "vmware",
+ "util.del_dir": True,
+ "search_file": self.tdir,
+ "wait_for_imc_cfg_file": conf_file,
+ "get_nics_to_enable": "",
+ },
+ ds.get_data,
+ )
+
+ self.assertIn("is not found", str(context.exception))
+
+
+class TestTransportIso9660(CiTestCase):
+ def setUp(self):
+ super(TestTransportIso9660, self).setUp()
+ self.add_patch("cloudinit.util.find_devs_with", "m_find_devs_with")
+ self.add_patch("cloudinit.util.mounts", "m_mounts")
+ self.add_patch("cloudinit.util.mount_cb", "m_mount_cb")
+ self.add_patch(
+ "cloudinit.sources.DataSourceOVF.get_ovf_env", "m_get_ovf_env"
+ )
+ self.m_get_ovf_env.return_value = ("myfile", "mycontent")
+
+ def test_find_already_mounted(self):
+ """Check we call get_ovf_env from on matching mounted devices"""
+ mounts = {
+ "/dev/sr9": {
+ "fstype": "iso9660",
+ "mountpoint": "wark/media/sr9",
+ "opts": "ro",
+ }
+ }
+ self.m_mounts.return_value = mounts
+
+ self.assertEqual("mycontent", dsovf.transport_iso9660())
+
+ def test_find_already_mounted_skips_non_iso9660(self):
+ """Check we call get_ovf_env ignoring non iso9660"""
+ mounts = {
+ "/dev/xvdb": {
+ "fstype": "vfat",
+ "mountpoint": "wark/foobar",
+ "opts": "defaults,noatime",
+ },
+ "/dev/xvdc": {
+ "fstype": "iso9660",
+ "mountpoint": "wark/media/sr9",
+ "opts": "ro",
+ },
+ }
+ # We use an OrderedDict here to ensure we check xvdb before xvdc
+ # as we're not mocking the regex matching, however, if we place
+ # an entry in the results then we can be reasonably sure that
+ # we're skipping an entry which fails to match.
+ self.m_mounts.return_value = OrderedDict(
+ sorted(mounts.items(), key=lambda t: t[0])
+ )
+
+ self.assertEqual("mycontent", dsovf.transport_iso9660())
+
+ def test_find_already_mounted_matches_kname(self):
+ """Check we dont regex match on basename of the device"""
+ mounts = {
+ "/dev/foo/bar/xvdc": {
+ "fstype": "iso9660",
+ "mountpoint": "wark/media/sr9",
+ "opts": "ro",
+ }
+ }
+ # we're skipping an entry which fails to match.
+ self.m_mounts.return_value = mounts
+
+ self.assertEqual(NOT_FOUND, dsovf.transport_iso9660())
+
+ def test_mount_cb_called_on_blkdevs_with_iso9660(self):
+ """Check we call mount_cb on blockdevs with iso9660 only"""
+ self.m_mounts.return_value = {}
+ self.m_find_devs_with.return_value = ["/dev/sr0"]
+ self.m_mount_cb.return_value = ("myfile", "mycontent")
+
+ self.assertEqual("mycontent", dsovf.transport_iso9660())
+ self.m_mount_cb.assert_called_with(
+ "/dev/sr0", dsovf.get_ovf_env, mtype="iso9660"
+ )
+
+ def test_mount_cb_called_on_blkdevs_with_iso9660_check_regex(self):
+ """Check we call mount_cb on blockdevs with iso9660 and match regex"""
+ self.m_mounts.return_value = {}
+ self.m_find_devs_with.return_value = [
+ "/dev/abc",
+ "/dev/my-cdrom",
+ "/dev/sr0",
+ ]
+ self.m_mount_cb.return_value = ("myfile", "mycontent")
+
+ self.assertEqual("mycontent", dsovf.transport_iso9660())
+ self.m_mount_cb.assert_called_with(
+ "/dev/sr0", dsovf.get_ovf_env, mtype="iso9660"
+ )
+
+ def test_mount_cb_not_called_no_matches(self):
+ """Check we don't call mount_cb if nothing matches"""
+ self.m_mounts.return_value = {}
+ self.m_find_devs_with.return_value = ["/dev/vg/myovf"]
+
+ self.assertEqual(NOT_FOUND, dsovf.transport_iso9660())
+ self.assertEqual(0, self.m_mount_cb.call_count)
+
+ def test_mount_cb_called_require_iso_false(self):
+ """Check we call mount_cb on blockdevs with require_iso=False"""
+ self.m_mounts.return_value = {}
+ self.m_find_devs_with.return_value = ["/dev/xvdz"]
+ self.m_mount_cb.return_value = ("myfile", "mycontent")
+
+ self.assertEqual(
+ "mycontent", dsovf.transport_iso9660(require_iso=False)
+ )
+
+ self.m_mount_cb.assert_called_with(
+ "/dev/xvdz", dsovf.get_ovf_env, mtype=None
+ )
+
+ def test_maybe_cdrom_device_none(self):
+ """Test maybe_cdrom_device returns False for none/empty input"""
+ self.assertFalse(dsovf.maybe_cdrom_device(None))
+ self.assertFalse(dsovf.maybe_cdrom_device(""))
+
+ def test_maybe_cdrom_device_non_string_exception(self):
+ """Test maybe_cdrom_device raises ValueError on non-string types"""
+ with self.assertRaises(ValueError):
+ dsovf.maybe_cdrom_device({"a": "eleven"})
+
+ def test_maybe_cdrom_device_false_on_multi_dir_paths(self):
+ """Test maybe_cdrom_device is false on /dev[/.*]/* paths"""
+ self.assertFalse(dsovf.maybe_cdrom_device("/dev/foo/sr0"))
+ self.assertFalse(dsovf.maybe_cdrom_device("foo/sr0"))
+ self.assertFalse(dsovf.maybe_cdrom_device("../foo/sr0"))
+ self.assertFalse(dsovf.maybe_cdrom_device("../foo/sr0"))
+
+ def test_maybe_cdrom_device_true_on_hd_partitions(self):
+ """Test maybe_cdrom_device is false on /dev/hd[a-z][0-9]+ paths"""
+ self.assertTrue(dsovf.maybe_cdrom_device("/dev/hda1"))
+ self.assertTrue(dsovf.maybe_cdrom_device("hdz9"))
+
+ def test_maybe_cdrom_device_true_on_valid_relative_paths(self):
+ """Test maybe_cdrom_device normalizes paths"""
+ self.assertTrue(dsovf.maybe_cdrom_device("/dev/wark/../sr9"))
+ self.assertTrue(dsovf.maybe_cdrom_device("///sr0"))
+ self.assertTrue(dsovf.maybe_cdrom_device("/sr0"))
+ self.assertTrue(dsovf.maybe_cdrom_device("//dev//hda"))
+
+ def test_maybe_cdrom_device_true_on_xvd_partitions(self):
+ """Test maybe_cdrom_device returns true on xvd*"""
+ self.assertTrue(dsovf.maybe_cdrom_device("/dev/xvda"))
+ self.assertTrue(dsovf.maybe_cdrom_device("/dev/xvda1"))
+ self.assertTrue(dsovf.maybe_cdrom_device("xvdza1"))
+
+
+@mock.patch(MPATH + "subp.which")
+@mock.patch(MPATH + "subp.subp")
+class TestTransportVmwareGuestinfo(CiTestCase):
+ """Test the com.vmware.guestInfo transport implemented in
+ transport_vmware_guestinfo."""
+
+ rpctool = "vmware-rpctool"
+ with_logs = True
+ rpctool_path = "/not/important/vmware-rpctool"
+
+ def test_without_vmware_rpctool_returns_notfound(self, m_subp, m_which):
+ m_which.return_value = None
+ self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo())
+ self.assertEqual(
+ 0,
+ m_subp.call_count,
+ "subp should not be called if no rpctool in path.",
+ )
+
+ def test_notfound_on_exit_code_1(self, m_subp, m_which):
+ """If vmware-rpctool exits 1, then must return not found."""
+ m_which.return_value = self.rpctool_path
+ m_subp.side_effect = subp.ProcessExecutionError(
+ stdout="", stderr="No value found", exit_code=1, cmd=["unused"]
+ )
+ self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo())
+ self.assertEqual(1, m_subp.call_count)
+ self.assertNotIn(
+ "WARNING",
+ self.logs.getvalue(),
+ "exit code of 1 by rpctool should not cause warning.",
+ )
+
+ def test_notfound_if_no_content_but_exit_zero(self, m_subp, m_which):
+ """If vmware-rpctool exited 0 with no stdout is normal not-found.
+
+ This isn't actually a case I've seen. normally on "not found",
+ rpctool would exit 1 with 'No value found' on stderr. But cover
+ the case where it exited 0 and just wrote nothing to stdout.
+ """
+ m_which.return_value = self.rpctool_path
+ m_subp.return_value = ("", "")
+ self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo())
+ self.assertEqual(1, m_subp.call_count)
+
+ def test_notfound_and_warns_on_unexpected_exit_code(self, m_subp, m_which):
+ """If vmware-rpctool exits non zero or 1, warnings should be logged."""
+ m_which.return_value = self.rpctool_path
+ m_subp.side_effect = subp.ProcessExecutionError(
+ stdout=None, stderr="No value found", exit_code=2, cmd=["unused"]
+ )
+ self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo())
+ self.assertEqual(1, m_subp.call_count)
+ self.assertIn(
+ "WARNING",
+ self.logs.getvalue(),
+ "exit code of 2 by rpctool should log WARNING.",
+ )
+
+ def test_found_when_guestinfo_present(self, m_subp, m_which):
+ """When there is a ovf info, transport should return it."""
+ m_which.return_value = self.rpctool_path
+ content = fill_properties({})
+ m_subp.return_value = (content, "")
+ self.assertEqual(content, dsovf.transport_vmware_guestinfo())
+ self.assertEqual(1, m_subp.call_count)
+
+
+#
+# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_rbx.py b/tests/unittests/sources/test_rbx.py
new file mode 100644
index 00000000..475bf498
--- /dev/null
+++ b/tests/unittests/sources/test_rbx.py
@@ -0,0 +1,241 @@
+import json
+
+from cloudinit import distros, helpers, subp
+from cloudinit.sources import DataSourceRbxCloud as ds
+from tests.unittests.helpers import CiTestCase, mock, populate_dir
+
+DS_PATH = "cloudinit.sources.DataSourceRbxCloud"
+
+CRYPTO_PASS = (
+ "$6$uktth46t$FvpDzFD2iL9YNZIG1Epz7957hJqbH0f"
+ "QKhnzcfBcUhEodGAWRqTy7tYG4nEW7SUOYBjxOSFIQW5"
+ "tToyGP41.s1"
+)
+
+CLOUD_METADATA = {
+ "vm": {
+ "memory": 4,
+ "cpu": 2,
+ "name": "vm-image-builder",
+ "_id": "5beab44f680cffd11f0e60fc",
+ },
+ "additionalMetadata": {
+ "username": "guru",
+ "sshKeys": ["ssh-rsa ..."],
+ "password": {"sha512": CRYPTO_PASS},
+ },
+ "disk": [
+ {
+ "size": 10,
+ "type": "ssd",
+ "name": "vm-image-builder-os",
+ "_id": "5beab450680cffd11f0e60fe",
+ },
+ {
+ "size": 2,
+ "type": "ssd",
+ "name": "ubuntu-1804-bionic",
+ "_id": "5bef002c680cffd11f107590",
+ },
+ ],
+ "netadp": [
+ {
+ "ip": [{"address": "62.181.8.174"}],
+ "network": {
+ "dns": {"nameservers": ["8.8.8.8", "8.8.4.4"]},
+ "routing": [],
+ "gateway": "62.181.8.1",
+ "netmask": "255.255.248.0",
+ "name": "public",
+ "type": "public",
+ "_id": "5784e97be2627505227b578c",
+ },
+ "speed": 1000,
+ "type": "hv",
+ "macaddress": "00:15:5D:FF:0F:03",
+ "_id": "5beab450680cffd11f0e6102",
+ },
+ {
+ "ip": [{"address": "10.209.78.11"}],
+ "network": {
+ "dns": {"nameservers": ["9.9.9.9", "8.8.8.8"]},
+ "routing": [],
+ "gateway": "10.209.78.1",
+ "netmask": "255.255.255.0",
+ "name": "network-determined-bardeen",
+ "type": "private",
+ "_id": "5beaec64680cffd11f0e7c31",
+ },
+ "speed": 1000,
+ "type": "hv",
+ "macaddress": "00:15:5D:FF:0F:24",
+ "_id": "5bec18c6680cffd11f0f0d8b",
+ },
+ ],
+ "dvddrive": [{"iso": {}}],
+}
+
+
+class TestRbxDataSource(CiTestCase):
+ parsed_user = None
+ allowed_subp = ["bash"]
+
+ def _fetch_distro(self, kind):
+ cls = distros.fetch(kind)
+ paths = helpers.Paths({})
+ return cls(kind, {}, paths)
+
+ def setUp(self):
+ super(TestRbxDataSource, self).setUp()
+ self.tmp = self.tmp_dir()
+ self.paths = helpers.Paths(
+ {"cloud_dir": self.tmp, "run_dir": self.tmp}
+ )
+
+ # defaults for few tests
+ self.ds = ds.DataSourceRbxCloud
+ self.seed_dir = self.paths.seed_dir
+ self.sys_cfg = {"datasource": {"RbxCloud": {"dsmode": "local"}}}
+
+ def test_seed_read_user_data_callback_empty_file(self):
+ populate_user_metadata(self.seed_dir, "")
+ populate_cloud_metadata(self.seed_dir, {})
+ results = ds.read_user_data_callback(self.seed_dir)
+
+ self.assertIsNone(results)
+
+ def test_seed_read_user_data_callback_valid_disk(self):
+ populate_user_metadata(self.seed_dir, "")
+ populate_cloud_metadata(self.seed_dir, CLOUD_METADATA)
+ results = ds.read_user_data_callback(self.seed_dir)
+
+ self.assertNotEqual(results, None)
+ self.assertTrue("userdata" in results)
+ self.assertTrue("metadata" in results)
+ self.assertTrue("cfg" in results)
+
+ def test_seed_read_user_data_callback_userdata(self):
+ userdata = "#!/bin/sh\nexit 1"
+ populate_user_metadata(self.seed_dir, userdata)
+ populate_cloud_metadata(self.seed_dir, CLOUD_METADATA)
+
+ results = ds.read_user_data_callback(self.seed_dir)
+
+ self.assertNotEqual(results, None)
+ self.assertTrue("userdata" in results)
+ self.assertEqual(results["userdata"], userdata)
+
+ def test_generate_network_config(self):
+ expected = {
+ "version": 1,
+ "config": [
+ {
+ "subnets": [
+ {
+ "control": "auto",
+ "dns_nameservers": ["8.8.8.8", "8.8.4.4"],
+ "netmask": "255.255.248.0",
+ "address": "62.181.8.174",
+ "type": "static",
+ "gateway": "62.181.8.1",
+ }
+ ],
+ "type": "physical",
+ "name": "eth0",
+ "mac_address": "00:15:5d:ff:0f:03",
+ },
+ {
+ "subnets": [
+ {
+ "control": "auto",
+ "dns_nameservers": ["9.9.9.9", "8.8.8.8"],
+ "netmask": "255.255.255.0",
+ "address": "10.209.78.11",
+ "type": "static",
+ "gateway": "10.209.78.1",
+ }
+ ],
+ "type": "physical",
+ "name": "eth1",
+ "mac_address": "00:15:5d:ff:0f:24",
+ },
+ ],
+ }
+ self.assertTrue(
+ ds.generate_network_config(CLOUD_METADATA["netadp"]), expected
+ )
+
+ @mock.patch(DS_PATH + ".subp.subp")
+ def test_gratuitous_arp_run_standard_arping(self, m_subp):
+ """Test handle run arping & parameters."""
+ items = [
+ {"destination": "172.17.0.2", "source": "172.16.6.104"},
+ {
+ "destination": "172.17.0.2",
+ "source": "172.16.6.104",
+ },
+ ]
+ ds.gratuitous_arp(items, self._fetch_distro("ubuntu"))
+ self.assertEqual(
+ [
+ mock.call(
+ ["arping", "-c", "2", "-S", "172.16.6.104", "172.17.0.2"]
+ ),
+ mock.call(
+ ["arping", "-c", "2", "-S", "172.16.6.104", "172.17.0.2"]
+ ),
+ ],
+ m_subp.call_args_list,
+ )
+
+ @mock.patch(DS_PATH + ".subp.subp")
+ def test_handle_rhel_like_arping(self, m_subp):
+ """Test handle on RHEL-like distros."""
+ items = [
+ {
+ "source": "172.16.6.104",
+ "destination": "172.17.0.2",
+ }
+ ]
+ ds.gratuitous_arp(items, self._fetch_distro("fedora"))
+ self.assertEqual(
+ [
+ mock.call(
+ ["arping", "-c", "2", "-s", "172.16.6.104", "172.17.0.2"]
+ )
+ ],
+ m_subp.call_args_list,
+ )
+
+ @mock.patch(
+ DS_PATH + ".subp.subp", side_effect=subp.ProcessExecutionError()
+ )
+ def test_continue_on_arping_error(self, m_subp):
+ """Continue when command error"""
+ items = [
+ {"destination": "172.17.0.2", "source": "172.16.6.104"},
+ {
+ "destination": "172.17.0.2",
+ "source": "172.16.6.104",
+ },
+ ]
+ ds.gratuitous_arp(items, self._fetch_distro("ubuntu"))
+ self.assertEqual(
+ [
+ mock.call(
+ ["arping", "-c", "2", "-S", "172.16.6.104", "172.17.0.2"]
+ ),
+ mock.call(
+ ["arping", "-c", "2", "-S", "172.16.6.104", "172.17.0.2"]
+ ),
+ ],
+ m_subp.call_args_list,
+ )
+
+
+def populate_cloud_metadata(path, data):
+ populate_dir(path, {"cloud.json": json.dumps(data)})
+
+
+def populate_user_metadata(path, data):
+ populate_dir(path, {"user.data": data})
diff --git a/tests/unittests/sources/test_scaleway.py b/tests/unittests/sources/test_scaleway.py
new file mode 100644
index 00000000..d7e8b969
--- /dev/null
+++ b/tests/unittests/sources/test_scaleway.py
@@ -0,0 +1,526 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import json
+
+import httpretty
+import requests
+
+from cloudinit import helpers, settings, sources
+from cloudinit.sources import DataSourceScaleway
+from tests.unittests.helpers import CiTestCase, HttprettyTestCase, mock
+
+
+class DataResponses(object):
+ """
+ Possible responses of the API endpoint
+ 169.254.42.42/user_data/cloud-init and
+ 169.254.42.42/vendor_data/cloud-init.
+ """
+
+ FAKE_USER_DATA = '#!/bin/bash\necho "user-data"'
+
+ @staticmethod
+ def rate_limited(method, uri, headers):
+ return 429, headers, ""
+
+ @staticmethod
+ def api_error(method, uri, headers):
+ return 500, headers, ""
+
+ @classmethod
+ def get_ok(cls, method, uri, headers):
+ return 200, headers, cls.FAKE_USER_DATA
+
+ @staticmethod
+ def empty(method, uri, headers):
+ """
+ No user data for this server.
+ """
+ return 404, headers, ""
+
+
+class MetadataResponses(object):
+ """
+ Possible responses of the metadata API.
+ """
+
+ FAKE_METADATA = {
+ "id": "00000000-0000-0000-0000-000000000000",
+ "hostname": "scaleway.host",
+ "tags": [
+ "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD",
+ ],
+ "ssh_public_keys": [
+ {
+ "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA",
+ "fingerprint": "2048 06:ae:... login (RSA)",
+ },
+ {
+ "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC",
+ "fingerprint": "2048 06:ff:... login2 (RSA)",
+ },
+ ],
+ }
+
+ @classmethod
+ def get_ok(cls, method, uri, headers):
+ return 200, headers, json.dumps(cls.FAKE_METADATA)
+
+
+class TestOnScaleway(CiTestCase):
+ def setUp(self):
+ super(TestOnScaleway, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ def install_mocks(self, fake_dmi, fake_file_exists, fake_cmdline):
+ mock, faked = fake_dmi
+ mock.return_value = "Scaleway" if faked else "Whatever"
+
+ mock, faked = fake_file_exists
+ mock.return_value = faked
+
+ mock, faked = fake_cmdline
+ mock.return_value = (
+ "initrd=initrd showopts scaleway nousb"
+ if faked
+ else "BOOT_IMAGE=/vmlinuz-3.11.0-26-generic"
+ )
+
+ @mock.patch("cloudinit.util.get_cmdline")
+ @mock.patch("os.path.exists")
+ @mock.patch("cloudinit.dmi.read_dmi_data")
+ def test_not_on_scaleway(
+ self, m_read_dmi_data, m_file_exists, m_get_cmdline
+ ):
+ self.install_mocks(
+ fake_dmi=(m_read_dmi_data, False),
+ fake_file_exists=(m_file_exists, False),
+ fake_cmdline=(m_get_cmdline, False),
+ )
+ self.assertFalse(DataSourceScaleway.on_scaleway())
+
+ # When not on Scaleway, get_data() returns False.
+ datasource = DataSourceScaleway.DataSourceScaleway(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ self.assertFalse(datasource.get_data())
+
+ @mock.patch("cloudinit.util.get_cmdline")
+ @mock.patch("os.path.exists")
+ @mock.patch("cloudinit.dmi.read_dmi_data")
+ def test_on_scaleway_dmi(
+ self, m_read_dmi_data, m_file_exists, m_get_cmdline
+ ):
+ """
+ dmidecode returns "Scaleway".
+ """
+ # dmidecode returns "Scaleway"
+ self.install_mocks(
+ fake_dmi=(m_read_dmi_data, True),
+ fake_file_exists=(m_file_exists, False),
+ fake_cmdline=(m_get_cmdline, False),
+ )
+ self.assertTrue(DataSourceScaleway.on_scaleway())
+
+ @mock.patch("cloudinit.util.get_cmdline")
+ @mock.patch("os.path.exists")
+ @mock.patch("cloudinit.dmi.read_dmi_data")
+ def test_on_scaleway_var_run_scaleway(
+ self, m_read_dmi_data, m_file_exists, m_get_cmdline
+ ):
+ """
+ /var/run/scaleway exists.
+ """
+ self.install_mocks(
+ fake_dmi=(m_read_dmi_data, False),
+ fake_file_exists=(m_file_exists, True),
+ fake_cmdline=(m_get_cmdline, False),
+ )
+ self.assertTrue(DataSourceScaleway.on_scaleway())
+
+ @mock.patch("cloudinit.util.get_cmdline")
+ @mock.patch("os.path.exists")
+ @mock.patch("cloudinit.dmi.read_dmi_data")
+ def test_on_scaleway_cmdline(
+ self, m_read_dmi_data, m_file_exists, m_get_cmdline
+ ):
+ """
+ "scaleway" in /proc/cmdline.
+ """
+ self.install_mocks(
+ fake_dmi=(m_read_dmi_data, False),
+ fake_file_exists=(m_file_exists, False),
+ fake_cmdline=(m_get_cmdline, True),
+ )
+ self.assertTrue(DataSourceScaleway.on_scaleway())
+
+
+def get_source_address_adapter(*args, **kwargs):
+ """
+ Scaleway user/vendor data API requires to be called with a privileged port.
+
+ If the unittests are run as non-root, the user doesn't have the permission
+ to bind on ports below 1024.
+
+ This function removes the bind on a privileged address, since anyway the
+ HTTP call is mocked by httpretty.
+ """
+ kwargs.pop("source_address")
+ return requests.adapters.HTTPAdapter(*args, **kwargs)
+
+
+class TestDataSourceScaleway(HttprettyTestCase):
+ def setUp(self):
+ tmp = self.tmp_dir()
+ self.datasource = DataSourceScaleway.DataSourceScaleway(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": tmp})
+ )
+ super(TestDataSourceScaleway, self).setUp()
+
+ self.metadata_url = DataSourceScaleway.BUILTIN_DS_CONFIG[
+ "metadata_url"
+ ]
+ self.userdata_url = DataSourceScaleway.BUILTIN_DS_CONFIG[
+ "userdata_url"
+ ]
+ self.vendordata_url = DataSourceScaleway.BUILTIN_DS_CONFIG[
+ "vendordata_url"
+ ]
+
+ self.add_patch(
+ "cloudinit.sources.DataSourceScaleway.on_scaleway",
+ "_m_on_scaleway",
+ return_value=True,
+ )
+ self.add_patch(
+ "cloudinit.sources.DataSourceScaleway.net.find_fallback_nic",
+ "_m_find_fallback_nic",
+ return_value="scalewaynic0",
+ )
+
+ @mock.patch("cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4")
+ @mock.patch(
+ "cloudinit.sources.DataSourceScaleway.SourceAddressAdapter",
+ get_source_address_adapter,
+ )
+ @mock.patch("cloudinit.util.get_cmdline")
+ @mock.patch("time.sleep", return_value=None)
+ def test_metadata_ok(self, sleep, m_get_cmdline, dhcpv4):
+ """
+ get_data() returns metadata, user data and vendor data.
+ """
+ m_get_cmdline.return_value = "scaleway"
+
+ # Make user data API return a valid response
+ httpretty.register_uri(
+ httpretty.GET, self.metadata_url, body=MetadataResponses.get_ok
+ )
+ httpretty.register_uri(
+ httpretty.GET, self.userdata_url, body=DataResponses.get_ok
+ )
+ httpretty.register_uri(
+ httpretty.GET, self.vendordata_url, body=DataResponses.get_ok
+ )
+ self.datasource.get_data()
+
+ self.assertEqual(
+ self.datasource.get_instance_id(),
+ MetadataResponses.FAKE_METADATA["id"],
+ )
+ self.assertEqual(
+ self.datasource.get_public_ssh_keys().sort(),
+ [
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC",
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD",
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA",
+ ].sort(),
+ )
+ self.assertEqual(
+ self.datasource.get_hostname(),
+ MetadataResponses.FAKE_METADATA["hostname"],
+ )
+ self.assertEqual(
+ self.datasource.get_userdata_raw(), DataResponses.FAKE_USER_DATA
+ )
+ self.assertEqual(
+ self.datasource.get_vendordata_raw(), DataResponses.FAKE_USER_DATA
+ )
+ self.assertIsNone(self.datasource.availability_zone)
+ self.assertIsNone(self.datasource.region)
+ self.assertEqual(sleep.call_count, 0)
+
+ def test_ssh_keys_empty(self):
+ """
+ get_public_ssh_keys() should return empty list if no ssh key are
+ available
+ """
+ self.datasource.metadata["tags"] = []
+ self.datasource.metadata["ssh_public_keys"] = []
+ self.assertEqual(self.datasource.get_public_ssh_keys(), [])
+
+ def test_ssh_keys_only_tags(self):
+ """
+ get_public_ssh_keys() should return list of keys available in tags
+ """
+ self.datasource.metadata["tags"] = [
+ "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD",
+ "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABCCCCC",
+ ]
+ self.datasource.metadata["ssh_public_keys"] = []
+ self.assertEqual(
+ self.datasource.get_public_ssh_keys().sort(),
+ [
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD",
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC",
+ ].sort(),
+ )
+
+ def test_ssh_keys_only_conf(self):
+ """
+ get_public_ssh_keys() should return list of keys available in
+ ssh_public_keys field
+ """
+ self.datasource.metadata["tags"] = []
+ self.datasource.metadata["ssh_public_keys"] = [
+ {
+ "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA",
+ "fingerprint": "2048 06:ae:... login (RSA)",
+ },
+ {
+ "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC",
+ "fingerprint": "2048 06:ff:... login2 (RSA)",
+ },
+ ]
+ self.assertEqual(
+ self.datasource.get_public_ssh_keys().sort(),
+ [
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC",
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD",
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA",
+ ].sort(),
+ )
+
+ def test_ssh_keys_both(self):
+ """
+ get_public_ssh_keys() should return a merge of keys available
+ in ssh_public_keys and tags
+ """
+ self.datasource.metadata["tags"] = [
+ "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD",
+ ]
+
+ self.datasource.metadata["ssh_public_keys"] = [
+ {
+ "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA",
+ "fingerprint": "2048 06:ae:... login (RSA)",
+ },
+ {
+ "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC",
+ "fingerprint": "2048 06:ff:... login2 (RSA)",
+ },
+ ]
+ self.assertEqual(
+ self.datasource.get_public_ssh_keys().sort(),
+ [
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC",
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD",
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA",
+ ].sort(),
+ )
+
+ @mock.patch("cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4")
+ @mock.patch(
+ "cloudinit.sources.DataSourceScaleway.SourceAddressAdapter",
+ get_source_address_adapter,
+ )
+ @mock.patch("cloudinit.util.get_cmdline")
+ @mock.patch("time.sleep", return_value=None)
+ def test_metadata_404(self, sleep, m_get_cmdline, dhcpv4):
+ """
+ get_data() returns metadata, but no user data nor vendor data.
+ """
+ m_get_cmdline.return_value = "scaleway"
+
+ # Make user and vendor data APIs return HTTP/404, which means there is
+ # no user / vendor data for the server.
+ httpretty.register_uri(
+ httpretty.GET, self.metadata_url, body=MetadataResponses.get_ok
+ )
+ httpretty.register_uri(
+ httpretty.GET, self.userdata_url, body=DataResponses.empty
+ )
+ httpretty.register_uri(
+ httpretty.GET, self.vendordata_url, body=DataResponses.empty
+ )
+ self.datasource.get_data()
+ self.assertIsNone(self.datasource.get_userdata_raw())
+ self.assertIsNone(self.datasource.get_vendordata_raw())
+ self.assertEqual(sleep.call_count, 0)
+
+ @mock.patch("cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4")
+ @mock.patch(
+ "cloudinit.sources.DataSourceScaleway.SourceAddressAdapter",
+ get_source_address_adapter,
+ )
+ @mock.patch("cloudinit.util.get_cmdline")
+ @mock.patch("time.sleep", return_value=None)
+ def test_metadata_rate_limit(self, sleep, m_get_cmdline, dhcpv4):
+ """
+ get_data() is rate limited two times by the metadata API when fetching
+ user data.
+ """
+ m_get_cmdline.return_value = "scaleway"
+
+ httpretty.register_uri(
+ httpretty.GET, self.metadata_url, body=MetadataResponses.get_ok
+ )
+ httpretty.register_uri(
+ httpretty.GET, self.vendordata_url, body=DataResponses.empty
+ )
+
+ httpretty.register_uri(
+ httpretty.GET,
+ self.userdata_url,
+ responses=[
+ httpretty.Response(body=DataResponses.rate_limited),
+ httpretty.Response(body=DataResponses.rate_limited),
+ httpretty.Response(body=DataResponses.get_ok),
+ ],
+ )
+ self.datasource.get_data()
+ self.assertEqual(
+ self.datasource.get_userdata_raw(), DataResponses.FAKE_USER_DATA
+ )
+ self.assertEqual(sleep.call_count, 2)
+
+ @mock.patch("cloudinit.sources.DataSourceScaleway.net.find_fallback_nic")
+ @mock.patch("cloudinit.util.get_cmdline")
+ def test_network_config_ok(self, m_get_cmdline, fallback_nic):
+ """
+ network_config will only generate IPv4 config if no ipv6 data is
+ available in the metadata
+ """
+ m_get_cmdline.return_value = "scaleway"
+ fallback_nic.return_value = "ens2"
+ self.datasource.metadata["ipv6"] = None
+
+ netcfg = self.datasource.network_config
+ resp = {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": "ens2",
+ "subnets": [{"type": "dhcp4"}],
+ }
+ ],
+ }
+ self.assertEqual(netcfg, resp)
+
+ @mock.patch("cloudinit.sources.DataSourceScaleway.net.find_fallback_nic")
+ @mock.patch("cloudinit.util.get_cmdline")
+ def test_network_config_ipv6_ok(self, m_get_cmdline, fallback_nic):
+ """
+ network_config will only generate IPv4/v6 configs if ipv6 data is
+ available in the metadata
+ """
+ m_get_cmdline.return_value = "scaleway"
+ fallback_nic.return_value = "ens2"
+ self.datasource.metadata["ipv6"] = {
+ "address": "2000:abc:4444:9876::42:999",
+ "gateway": "2000:abc:4444:9876::42:000",
+ "netmask": "127",
+ }
+
+ netcfg = self.datasource.network_config
+ resp = {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": "ens2",
+ "subnets": [
+ {"type": "dhcp4"},
+ {
+ "type": "static",
+ "address": "2000:abc:4444:9876::42:999",
+ "gateway": "2000:abc:4444:9876::42:000",
+ "netmask": "127",
+ },
+ ],
+ }
+ ],
+ }
+ self.assertEqual(netcfg, resp)
+
+ @mock.patch("cloudinit.sources.DataSourceScaleway.net.find_fallback_nic")
+ @mock.patch("cloudinit.util.get_cmdline")
+ def test_network_config_existing(self, m_get_cmdline, fallback_nic):
+ """
+ network_config() should return the same data if a network config
+ already exists
+ """
+ m_get_cmdline.return_value = "scaleway"
+ self.datasource._network_config = "0xdeadbeef"
+
+ netcfg = self.datasource.network_config
+ self.assertEqual(netcfg, "0xdeadbeef")
+
+ @mock.patch("cloudinit.sources.DataSourceScaleway.net.find_fallback_nic")
+ @mock.patch("cloudinit.util.get_cmdline")
+ def test_network_config_unset(self, m_get_cmdline, fallback_nic):
+ """
+ _network_config will be set to sources.UNSET after the first boot.
+ Make sure it behave correctly.
+ """
+ m_get_cmdline.return_value = "scaleway"
+ fallback_nic.return_value = "ens2"
+ self.datasource.metadata["ipv6"] = None
+ self.datasource._network_config = sources.UNSET
+
+ resp = {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": "ens2",
+ "subnets": [{"type": "dhcp4"}],
+ }
+ ],
+ }
+
+ netcfg = self.datasource.network_config
+ self.assertEqual(netcfg, resp)
+
+ @mock.patch("cloudinit.sources.DataSourceScaleway.LOG.warning")
+ @mock.patch("cloudinit.sources.DataSourceScaleway.net.find_fallback_nic")
+ @mock.patch("cloudinit.util.get_cmdline")
+ def test_network_config_cached_none(
+ self, m_get_cmdline, fallback_nic, logwarning
+ ):
+ """
+ network_config() should return config data if cached data is None
+ rather than sources.UNSET
+ """
+ m_get_cmdline.return_value = "scaleway"
+ fallback_nic.return_value = "ens2"
+ self.datasource.metadata["ipv6"] = None
+ self.datasource._network_config = None
+
+ resp = {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": "ens2",
+ "subnets": [{"type": "dhcp4"}],
+ }
+ ],
+ }
+
+ netcfg = self.datasource.network_config
+ self.assertEqual(netcfg, resp)
+ logwarning.assert_called_with(
+ "Found None as cached _network_config. Resetting to %s",
+ sources.UNSET,
+ )
diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/sources/test_smartos.py
index 5847a384..55239c4e 100644
--- a/tests/unittests/test_datasource/test_smartos.py
+++ b/tests/unittests/sources/test_smartos.py
@@ -5,14 +5,13 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-'''This is a testcase for the SmartOS datasource.
+"""This is a testcase for the SmartOS datasource.
It replicates a serial console and acts like the SmartOS console does in
order to validate return responses.
-'''
+"""
-from binascii import crc32
import json
import multiprocessing
import os
@@ -22,32 +21,40 @@ import signal
import stat
import unittest
import uuid
+from binascii import crc32
+from cloudinit import helpers as c_helpers
from cloudinit import serial
+from cloudinit.event import EventScope, EventType
from cloudinit.sources import DataSourceSmartOS
+from cloudinit.sources.DataSourceSmartOS import SERIAL_DEVICE, SMARTOS_ENV_KVM
from cloudinit.sources.DataSourceSmartOS import (
convert_smartos_network_data as convert_net,
- SMARTOS_ENV_KVM, SERIAL_DEVICE, get_smartos_environ,
- identify_file)
-from cloudinit.event import EventType
-
-from cloudinit import helpers as c_helpers
-from cloudinit.util import (b64e, write_file)
-from cloudinit.subp import (subp, ProcessExecutionError, which)
-
-from cloudinit.tests.helpers import (
- CiTestCase, mock, FilesystemMockingTestCase, skipIf)
-
+)
+from cloudinit.sources.DataSourceSmartOS import (
+ get_smartos_environ,
+ identify_file,
+)
+from cloudinit.subp import ProcessExecutionError, subp, which
+from cloudinit.util import b64e, write_file
+from tests.unittests.helpers import (
+ CiTestCase,
+ FilesystemMockingTestCase,
+ mock,
+ skipIf,
+)
try:
import serial as _pyserial
+
assert _pyserial # avoid pyflakes error F401: import unused
HAS_PYSERIAL = True
except ImportError:
HAS_PYSERIAL = False
-DSMOS = 'cloudinit.sources.DataSourceSmartOS'
-SDC_NICS = json.loads("""
+DSMOS = "cloudinit.sources.DataSourceSmartOS"
+SDC_NICS = json.loads(
+ """
[
{
"nic_tag": "external",
@@ -87,10 +94,12 @@ SDC_NICS = json.loads("""
]
}
]
-""")
+"""
+)
-SDC_NICS_ALT = json.loads("""
+SDC_NICS_ALT = json.loads(
+ """
[
{
"interface": "net0",
@@ -126,9 +135,11 @@ SDC_NICS_ALT = json.loads("""
"mtu": 1500
}
]
-""")
+"""
+)
-SDC_NICS_DHCP = json.loads("""
+SDC_NICS_DHCP = json.loads(
+ """
[
{
"interface": "net0",
@@ -164,9 +175,11 @@ SDC_NICS_DHCP = json.loads("""
"mtu": 1500
}
]
-""")
+"""
+)
-SDC_NICS_MIP = json.loads("""
+SDC_NICS_MIP = json.loads(
+ """
[
{
"interface": "net0",
@@ -204,9 +217,11 @@ SDC_NICS_MIP = json.loads("""
"mtu": 1500
}
]
-""")
+"""
+)
-SDC_NICS_MIP_IPV6 = json.loads("""
+SDC_NICS_MIP_IPV6 = json.loads(
+ """
[
{
"interface": "net0",
@@ -243,9 +258,11 @@ SDC_NICS_MIP_IPV6 = json.loads("""
"mtu": 1500
}
]
-""")
+"""
+)
-SDC_NICS_IPV4_IPV6 = json.loads("""
+SDC_NICS_IPV4_IPV6 = json.loads(
+ """
[
{
"interface": "net0",
@@ -277,9 +294,11 @@ SDC_NICS_IPV4_IPV6 = json.loads("""
"mtu": 1500
}
]
-""")
+"""
+)
-SDC_NICS_SINGLE_GATEWAY = json.loads("""
+SDC_NICS_SINGLE_GATEWAY = json.loads(
+ """
[
{
"interface":"net0",
@@ -309,32 +328,33 @@ SDC_NICS_SINGLE_GATEWAY = json.loads("""
"mtu":1500
}
]
-""")
+"""
+)
MOCK_RETURNS = {
- 'hostname': 'test-host',
- 'root_authorized_keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname',
- 'disable_iptables_flag': None,
- 'enable_motd_sys_info': None,
- 'test-var1': 'some data',
- 'cloud-init:user-data': '\n'.join(['#!/bin/sh', '/bin/true', '']),
- 'sdc:datacenter_name': 'somewhere2',
- 'sdc:operator-script': '\n'.join(['bin/true', '']),
- 'sdc:uuid': str(uuid.uuid4()),
- 'sdc:vendor-data': '\n'.join(['VENDOR_DATA', '']),
- 'user-data': '\n'.join(['something', '']),
- 'user-script': '\n'.join(['/bin/true', '']),
- 'sdc:nics': json.dumps(SDC_NICS),
+ "hostname": "test-host",
+ "root_authorized_keys": "ssh-rsa AAAAB3Nz...aC1yc2E= keyname",
+ "disable_iptables_flag": None,
+ "enable_motd_sys_info": None,
+ "test-var1": "some data",
+ "cloud-init:user-data": "\n".join(["#!/bin/sh", "/bin/true", ""]),
+ "sdc:datacenter_name": "somewhere2",
+ "sdc:operator-script": "\n".join(["bin/true", ""]),
+ "sdc:uuid": str(uuid.uuid4()),
+ "sdc:vendor-data": "\n".join(["VENDOR_DATA", ""]),
+ "user-data": "\n".join(["something", ""]),
+ "user-script": "\n".join(["/bin/true", ""]),
+ "sdc:nics": json.dumps(SDC_NICS),
}
-DMI_DATA_RETURN = 'smartdc'
+DMI_DATA_RETURN = "smartdc"
# Useful for calculating the length of a frame body. A SUCCESS body will be
# followed by more characters or be one character less if SUCCESS with no
# payload. See Section 4.3 of https://eng.joyent.com/mdata/protocol.html.
-SUCCESS_LEN = len('0123abcd SUCCESS ')
-NOTFOUND_LEN = len('0123abcd NOTFOUND')
+SUCCESS_LEN = len("0123abcd SUCCESS ")
+NOTFOUND_LEN = len("0123abcd NOTFOUND")
class PsuedoJoyentClient(object):
@@ -364,11 +384,11 @@ class PsuedoJoyentClient(object):
return True
def open_transport(self):
- assert(not self._is_open)
+ assert not self._is_open
self._is_open = True
def close_transport(self):
- assert(self._is_open)
+ assert self._is_open
self._is_open = False
@@ -381,21 +401,35 @@ class TestSmartOSDataSource(FilesystemMockingTestCase):
self.add_patch(DSMOS + ".get_smartos_environ", "get_smartos_environ")
self.add_patch(DSMOS + ".jmc_client_factory", "jmc_cfact")
- self.legacy_user_d = self.tmp_path('legacy_user_tmp')
+ self.legacy_user_d = self.tmp_path("legacy_user_tmp")
os.mkdir(self.legacy_user_d)
- self.add_patch(DSMOS + ".LEGACY_USER_D", "m_legacy_user_d",
- autospec=False, new=self.legacy_user_d)
- self.add_patch(DSMOS + ".identify_file", "m_identify_file",
- return_value="text/plain")
-
- def _get_ds(self, mockdata=None, mode=DataSourceSmartOS.SMARTOS_ENV_KVM,
- sys_cfg=None, ds_cfg=None):
+ self.add_patch(
+ DSMOS + ".LEGACY_USER_D",
+ "m_legacy_user_d",
+ autospec=False,
+ new=self.legacy_user_d,
+ )
+ self.add_patch(
+ DSMOS + ".identify_file",
+ "m_identify_file",
+ return_value="text/plain",
+ )
+
+ def _get_ds(
+ self,
+ mockdata=None,
+ mode=DataSourceSmartOS.SMARTOS_ENV_KVM,
+ sys_cfg=None,
+ ds_cfg=None,
+ ):
self.jmc_cfact.return_value = PsuedoJoyentClient(mockdata)
self.get_smartos_environ.return_value = mode
tmpd = self.tmp_dir()
- dirs = {'cloud_dir': self.tmp_path('cloud_dir', tmpd),
- 'run_dir': self.tmp_path('run_dir')}
+ dirs = {
+ "cloud_dir": self.tmp_path("cloud_dir", tmpd),
+ "run_dir": self.tmp_path("run_dir"),
+ }
for d in dirs.values():
os.mkdir(d)
paths = c_helpers.Paths(dirs)
@@ -404,14 +438,15 @@ class TestSmartOSDataSource(FilesystemMockingTestCase):
sys_cfg = {}
if ds_cfg is not None:
- sys_cfg['datasource'] = sys_cfg.get('datasource', {})
- sys_cfg['datasource']['SmartOS'] = ds_cfg
+ sys_cfg["datasource"] = sys_cfg.get("datasource", {})
+ sys_cfg["datasource"]["SmartOS"] = ds_cfg
return DataSourceSmartOS.DataSourceSmartOS(
- sys_cfg, distro=None, paths=paths)
+ sys_cfg, distro=None, paths=paths
+ )
def test_no_base64(self):
- ds_cfg = {'no_base64_decode': ['test_var1'], 'all_base': True}
+ ds_cfg = {"no_base64_decode": ["test_var1"], "all_base": True}
dsrc = self._get_ds(ds_cfg=ds_cfg)
ret = dsrc.get_data()
self.assertTrue(ret)
@@ -420,166 +455,180 @@ class TestSmartOSDataSource(FilesystemMockingTestCase):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(MOCK_RETURNS['sdc:uuid'],
- dsrc.metadata['instance-id'])
+ self.assertEqual(
+ MOCK_RETURNS["sdc:uuid"], dsrc.metadata["instance-id"]
+ )
def test_platform_info(self):
"""All platform-related attributes are properly set."""
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
- self.assertEqual('joyent', dsrc.cloud_name)
- self.assertEqual('joyent', dsrc.platform_type)
- self.assertEqual('serial (/dev/ttyS1)', dsrc.subplatform)
+ self.assertEqual("joyent", dsrc.cloud_name)
+ self.assertEqual("joyent", dsrc.platform_type)
+ self.assertEqual("serial (/dev/ttyS1)", dsrc.subplatform)
def test_root_keys(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(MOCK_RETURNS['root_authorized_keys'],
- dsrc.metadata['public-keys'])
+ self.assertEqual(
+ MOCK_RETURNS["root_authorized_keys"], dsrc.metadata["public-keys"]
+ )
def test_hostname_b64(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(MOCK_RETURNS['hostname'],
- dsrc.metadata['local-hostname'])
+ self.assertEqual(
+ MOCK_RETURNS["hostname"], dsrc.metadata["local-hostname"]
+ )
def test_hostname(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(MOCK_RETURNS['hostname'],
- dsrc.metadata['local-hostname'])
+ self.assertEqual(
+ MOCK_RETURNS["hostname"], dsrc.metadata["local-hostname"]
+ )
def test_hostname_if_no_sdc_hostname(self):
my_returns = MOCK_RETURNS.copy()
- my_returns['sdc:hostname'] = 'sdc-' + my_returns['hostname']
+ my_returns["sdc:hostname"] = "sdc-" + my_returns["hostname"]
dsrc = self._get_ds(mockdata=my_returns)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(my_returns['hostname'],
- dsrc.metadata['local-hostname'])
+ self.assertEqual(
+ my_returns["hostname"], dsrc.metadata["local-hostname"]
+ )
def test_sdc_hostname_if_no_hostname(self):
my_returns = MOCK_RETURNS.copy()
- my_returns['sdc:hostname'] = 'sdc-' + my_returns['hostname']
- del my_returns['hostname']
+ my_returns["sdc:hostname"] = "sdc-" + my_returns["hostname"]
+ del my_returns["hostname"]
dsrc = self._get_ds(mockdata=my_returns)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(my_returns['sdc:hostname'],
- dsrc.metadata['local-hostname'])
+ self.assertEqual(
+ my_returns["sdc:hostname"], dsrc.metadata["local-hostname"]
+ )
def test_sdc_uuid_if_no_hostname_or_sdc_hostname(self):
my_returns = MOCK_RETURNS.copy()
- del my_returns['hostname']
+ del my_returns["hostname"]
dsrc = self._get_ds(mockdata=my_returns)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(my_returns['sdc:uuid'],
- dsrc.metadata['local-hostname'])
+ self.assertEqual(
+ my_returns["sdc:uuid"], dsrc.metadata["local-hostname"]
+ )
def test_userdata(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(MOCK_RETURNS['user-data'],
- dsrc.metadata['legacy-user-data'])
- self.assertEqual(MOCK_RETURNS['cloud-init:user-data'],
- dsrc.userdata_raw)
+ self.assertEqual(
+ MOCK_RETURNS["user-data"], dsrc.metadata["legacy-user-data"]
+ )
+ self.assertEqual(
+ MOCK_RETURNS["cloud-init:user-data"], dsrc.userdata_raw
+ )
def test_sdc_nics(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(json.loads(MOCK_RETURNS['sdc:nics']),
- dsrc.metadata['network-data'])
+ self.assertEqual(
+ json.loads(MOCK_RETURNS["sdc:nics"]), dsrc.metadata["network-data"]
+ )
def test_sdc_scripts(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(MOCK_RETURNS['user-script'],
- dsrc.metadata['user-script'])
+ self.assertEqual(
+ MOCK_RETURNS["user-script"], dsrc.metadata["user-script"]
+ )
legacy_script_f = "%s/user-script" % self.legacy_user_d
print("legacy_script_f=%s" % legacy_script_f)
self.assertTrue(os.path.exists(legacy_script_f))
self.assertTrue(os.path.islink(legacy_script_f))
user_script_perm = oct(os.stat(legacy_script_f)[stat.ST_MODE])[-3:]
- self.assertEqual(user_script_perm, '700')
+ self.assertEqual(user_script_perm, "700")
def test_scripts_shebanged(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(MOCK_RETURNS['user-script'],
- dsrc.metadata['user-script'])
+ self.assertEqual(
+ MOCK_RETURNS["user-script"], dsrc.metadata["user-script"]
+ )
legacy_script_f = "%s/user-script" % self.legacy_user_d
self.assertTrue(os.path.exists(legacy_script_f))
self.assertTrue(os.path.islink(legacy_script_f))
shebang = None
- with open(legacy_script_f, 'r') as f:
+ with open(legacy_script_f, "r") as f:
shebang = f.readlines()[0].strip()
self.assertEqual(shebang, "#!/bin/bash")
user_script_perm = oct(os.stat(legacy_script_f)[stat.ST_MODE])[-3:]
- self.assertEqual(user_script_perm, '700')
+ self.assertEqual(user_script_perm, "700")
def test_scripts_shebang_not_added(self):
"""
- Test that the SmartOS requirement that plain text scripts
- are executable. This test makes sure that plain texts scripts
- with out file magic have it added appropriately by cloud-init.
+ Test that the SmartOS requirement that plain text scripts
+ are executable. This test makes sure that plain texts scripts
+ with out file magic have it added appropriately by cloud-init.
"""
my_returns = MOCK_RETURNS.copy()
- my_returns['user-script'] = '\n'.join(['#!/usr/bin/perl',
- 'print("hi")', ''])
+ my_returns["user-script"] = "\n".join(
+ ["#!/usr/bin/perl", 'print("hi")', ""]
+ )
dsrc = self._get_ds(mockdata=my_returns)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(my_returns['user-script'],
- dsrc.metadata['user-script'])
+ self.assertEqual(
+ my_returns["user-script"], dsrc.metadata["user-script"]
+ )
legacy_script_f = "%s/user-script" % self.legacy_user_d
self.assertTrue(os.path.exists(legacy_script_f))
self.assertTrue(os.path.islink(legacy_script_f))
shebang = None
- with open(legacy_script_f, 'r') as f:
+ with open(legacy_script_f, "r") as f:
shebang = f.readlines()[0].strip()
self.assertEqual(shebang, "#!/usr/bin/perl")
def test_userdata_removed(self):
"""
- User-data in the SmartOS world is supposed to be written to a file
- each and every boot. This tests to make sure that in the event the
- legacy user-data is removed, the existing user-data is backed-up
- and there is no /var/db/user-data left.
+ User-data in the SmartOS world is supposed to be written to a file
+ each and every boot. This tests to make sure that in the event the
+ legacy user-data is removed, the existing user-data is backed-up
+ and there is no /var/db/user-data left.
"""
user_data_f = "%s/mdata-user-data" % self.legacy_user_d
- with open(user_data_f, 'w') as f:
+ with open(user_data_f, "w") as f:
f.write("PREVIOUS")
my_returns = MOCK_RETURNS.copy()
- del my_returns['user-data']
+ del my_returns["user-data"]
dsrc = self._get_ds(mockdata=my_returns)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertFalse(dsrc.metadata.get('legacy-user-data'))
+ self.assertFalse(dsrc.metadata.get("legacy-user-data"))
found_new = False
for root, _dirs, files in os.walk(self.legacy_user_d):
for name in files:
name_f = os.path.join(root, name)
permissions = oct(os.stat(name_f)[stat.ST_MODE])[-3:]
- if re.match(r'.*\/mdata-user-data$', name_f):
+ if re.match(r".*\/mdata-user-data$", name_f):
found_new = True
print(name_f)
- self.assertEqual(permissions, '400')
+ self.assertEqual(permissions, "400")
self.assertFalse(found_new)
@@ -587,17 +636,18 @@ class TestSmartOSDataSource(FilesystemMockingTestCase):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(MOCK_RETURNS['sdc:vendor-data'],
- dsrc.metadata['vendor-data'])
+ self.assertEqual(
+ MOCK_RETURNS["sdc:vendor-data"], dsrc.metadata["vendor-data"]
+ )
def test_default_vendor_data(self):
my_returns = MOCK_RETURNS.copy()
- def_op_script = my_returns['sdc:vendor-data']
- del my_returns['sdc:vendor-data']
+ def_op_script = my_returns["sdc:vendor-data"]
+ del my_returns["sdc:vendor-data"]
dsrc = self._get_ds(mockdata=my_returns)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertNotEqual(def_op_script, dsrc.metadata['vendor-data'])
+ self.assertNotEqual(def_op_script, dsrc.metadata["vendor-data"])
# we expect default vendor-data is a boothook
self.assertTrue(dsrc.vendordata_raw.startswith("#cloud-boothook"))
@@ -606,15 +656,19 @@ class TestSmartOSDataSource(FilesystemMockingTestCase):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(MOCK_RETURNS['disable_iptables_flag'],
- dsrc.metadata['iptables_disable'])
+ self.assertEqual(
+ MOCK_RETURNS["disable_iptables_flag"],
+ dsrc.metadata["iptables_disable"],
+ )
def test_motd_sys_info(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(MOCK_RETURNS['enable_motd_sys_info'],
- dsrc.metadata['motd_sys_info'])
+ self.assertEqual(
+ MOCK_RETURNS["enable_motd_sys_info"],
+ dsrc.metadata["motd_sys_info"],
+ )
def test_default_ephemeral(self):
# Test to make sure that the builtin config has the ephemeral
@@ -625,16 +679,16 @@ class TestSmartOSDataSource(FilesystemMockingTestCase):
ret = dsrc.get_data()
self.assertTrue(ret)
- assert 'disk_setup' in cfg
- assert 'fs_setup' in cfg
- self.assertIsInstance(cfg['disk_setup'], dict)
- self.assertIsInstance(cfg['fs_setup'], list)
+ assert "disk_setup" in cfg
+ assert "fs_setup" in cfg
+ self.assertIsInstance(cfg["disk_setup"], dict)
+ self.assertIsInstance(cfg["fs_setup"], list)
def test_override_disk_aliases(self):
# Test to make sure that the built-in DS is overriden
builtin = DataSourceSmartOS.BUILTIN_DS_CONFIG
- mydscfg = {'disk_aliases': {'FOO': '/dev/bar'}}
+ mydscfg = {"disk_aliases": {"FOO": "/dev/bar"}}
# expect that these values are in builtin, or this is pointless
for k in mydscfg:
@@ -644,21 +698,30 @@ class TestSmartOSDataSource(FilesystemMockingTestCase):
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEqual(mydscfg['disk_aliases']['FOO'],
- dsrc.ds_cfg['disk_aliases']['FOO'])
+ self.assertEqual(
+ mydscfg["disk_aliases"]["FOO"], dsrc.ds_cfg["disk_aliases"]["FOO"]
+ )
- self.assertEqual(dsrc.device_name_to_device('FOO'),
- mydscfg['disk_aliases']['FOO'])
+ self.assertEqual(
+ dsrc.device_name_to_device("FOO"), mydscfg["disk_aliases"]["FOO"]
+ )
def test_reconfig_network_on_boot(self):
# Test to ensure that network is configured from metadata on each boot
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
- self.assertSetEqual(set([EventType.BOOT_NEW_INSTANCE, EventType.BOOT]),
- dsrc.update_events['network'])
+ self.assertSetEqual(
+ {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY,
+ },
+ dsrc.default_update_events[EventScope.NETWORK],
+ )
class TestIdentifyFile(CiTestCase):
"""Test the 'identify_file' utility."""
+
@skipIf(not which("file"), "command 'file' not available.")
def test_file_happy_path(self):
"""Test file is available and functional on plain text."""
@@ -676,14 +739,16 @@ class TestIdentifyFile(CiTestCase):
self.assertEqual(None, identify_file(fname))
self.assertEqual(
[mock.call(["file", "--brief", "--mime-type", fname])],
- m_subp.call_args_list)
+ m_subp.call_args_list,
+ )
class ShortReader(object):
"""Implements a 'read' interface for bytes provided.
much like io.BytesIO but the 'endbyte' acts as if EOF.
When it is reached a short will be returned."""
- def __init__(self, initial_bytes, endbyte=b'\0'):
+
+ def __init__(self, initial_bytes, endbyte=b"\0"):
self.data = initial_bytes
self.index = 0
self.len = len(self.data)
@@ -696,7 +761,7 @@ class ShortReader(object):
def read(self, size=-1):
"""Read size bytes but not past a null."""
if size == 0 or self.index >= self.len:
- return b''
+ return b""
rsize = size
if size < 0 or size + self.index > self.len:
@@ -707,7 +772,7 @@ class ShortReader(object):
rsize = next_null - self.index + 1
i = self.index
self.index += rsize
- ret = self.data[i:i + rsize]
+ ret = self.data[i : i + rsize]
if len(ret) and ret[-1:] == self.endbyte:
ret = ret[:-1]
return ret
@@ -715,32 +780,34 @@ class ShortReader(object):
class TestJoyentMetadataClient(FilesystemMockingTestCase):
- invalid = b'invalid command\n'
- failure = b'FAILURE\n'
- v2_ok = b'V2_OK\n'
+ invalid = b"invalid command\n"
+ failure = b"FAILURE\n"
+ v2_ok = b"V2_OK\n"
def setUp(self):
super(TestJoyentMetadataClient, self).setUp()
self.serial = mock.MagicMock(spec=serial.Serial)
- self.request_id = 0xabcdef12
- self.metadata_value = 'value'
+ self.request_id = 0xABCDEF12
+ self.metadata_value = "value"
self.response_parts = {
- 'command': 'SUCCESS',
- 'crc': 'b5a9ff00',
- 'length': SUCCESS_LEN + len(b64e(self.metadata_value)),
- 'payload': b64e(self.metadata_value),
- 'request_id': '{0:08x}'.format(self.request_id),
+ "command": "SUCCESS",
+ "crc": "b5a9ff00",
+ "length": SUCCESS_LEN + len(b64e(self.metadata_value)),
+ "payload": b64e(self.metadata_value),
+ "request_id": "{0:08x}".format(self.request_id),
}
def make_response():
- payloadstr = ''
- if 'payload' in self.response_parts:
- payloadstr = ' {0}'.format(self.response_parts['payload'])
- return ('V2 {length} {crc} {request_id} '
- '{command}{payloadstr}\n'.format(
- payloadstr=payloadstr,
- **self.response_parts).encode('ascii'))
+ payloadstr = ""
+ if "payload" in self.response_parts:
+ payloadstr = " {0}".format(self.response_parts["payload"])
+ return (
+ "V2 {length} {crc} {request_id} "
+ "{command}{payloadstr}\n".format(
+ payloadstr=payloadstr, **self.response_parts
+ ).encode("ascii")
+ )
self.metasource_data = None
@@ -754,41 +821,49 @@ class TestJoyentMetadataClient(FilesystemMockingTestCase):
self.serial.read.side_effect = read_response
self.patched_funcs.enter_context(
- mock.patch('cloudinit.sources.DataSourceSmartOS.random.randint',
- mock.Mock(return_value=self.request_id)))
+ mock.patch(
+ "cloudinit.sources.DataSourceSmartOS.random.randint",
+ mock.Mock(return_value=self.request_id),
+ )
+ )
def _get_client(self):
return DataSourceSmartOS.JoyentMetadataClient(
- fp=self.serial, smartos_type=DataSourceSmartOS.SMARTOS_ENV_KVM)
+ fp=self.serial, smartos_type=DataSourceSmartOS.SMARTOS_ENV_KVM
+ )
def _get_serial_client(self):
self.serial.timeout = 1
- return DataSourceSmartOS.JoyentMetadataSerialClient(None,
- fp=self.serial)
+ return DataSourceSmartOS.JoyentMetadataSerialClient(
+ None, fp=self.serial
+ )
def assertEndsWith(self, haystack, prefix):
- self.assertTrue(haystack.endswith(prefix),
- "{0} does not end with '{1}'".format(
- repr(haystack), prefix))
+ self.assertTrue(
+ haystack.endswith(prefix),
+ "{0} does not end with '{1}'".format(repr(haystack), prefix),
+ )
def assertStartsWith(self, haystack, prefix):
- self.assertTrue(haystack.startswith(prefix),
- "{0} does not start with '{1}'".format(
- repr(haystack), prefix))
+ self.assertTrue(
+ haystack.startswith(prefix),
+ "{0} does not start with '{1}'".format(repr(haystack), prefix),
+ )
def assertNoMoreSideEffects(self, obj):
self.assertRaises(StopIteration, obj)
def test_get_metadata_writes_a_single_line(self):
client = self._get_client()
- client.get('some_key')
+ client.get("some_key")
self.assertEqual(1, self.serial.write.call_count)
written_line = self.serial.write.call_args[0][0]
- self.assertEndsWith(written_line.decode('ascii'),
- b'\n'.decode('ascii'))
- self.assertEqual(1, written_line.count(b'\n'))
+ self.assertEndsWith(
+ written_line.decode("ascii"), b"\n".decode("ascii")
+ )
+ self.assertEqual(1, written_line.count(b"\n"))
- def _get_written_line(self, key='some_key'):
+ def _get_written_line(self, key="some_key"):
client = self._get_client()
client.get(key)
return self.serial.write.call_args[0][0]
@@ -798,76 +873,86 @@ class TestJoyentMetadataClient(FilesystemMockingTestCase):
def test_get_metadata_line_starts_with_v2(self):
foo = self._get_written_line()
- self.assertStartsWith(foo.decode('ascii'), b'V2'.decode('ascii'))
+ self.assertStartsWith(foo.decode("ascii"), b"V2".decode("ascii"))
def test_get_metadata_uses_get_command(self):
- parts = self._get_written_line().decode('ascii').strip().split(' ')
- self.assertEqual('GET', parts[4])
+ parts = self._get_written_line().decode("ascii").strip().split(" ")
+ self.assertEqual("GET", parts[4])
def test_get_metadata_base64_encodes_argument(self):
- key = 'my_key'
- parts = self._get_written_line(key).decode('ascii').strip().split(' ')
+ key = "my_key"
+ parts = self._get_written_line(key).decode("ascii").strip().split(" ")
self.assertEqual(b64e(key), parts[5])
def test_get_metadata_calculates_length_correctly(self):
- parts = self._get_written_line().decode('ascii').strip().split(' ')
- expected_length = len(' '.join(parts[3:]))
+ parts = self._get_written_line().decode("ascii").strip().split(" ")
+ expected_length = len(" ".join(parts[3:]))
self.assertEqual(expected_length, int(parts[1]))
def test_get_metadata_uses_appropriate_request_id(self):
- parts = self._get_written_line().decode('ascii').strip().split(' ')
+ parts = self._get_written_line().decode("ascii").strip().split(" ")
request_id = parts[3]
self.assertEqual(8, len(request_id))
self.assertEqual(request_id, request_id.lower())
def test_get_metadata_uses_random_number_for_request_id(self):
line = self._get_written_line()
- request_id = line.decode('ascii').strip().split(' ')[3]
- self.assertEqual('{0:08x}'.format(self.request_id), request_id)
+ request_id = line.decode("ascii").strip().split(" ")[3]
+ self.assertEqual("{0:08x}".format(self.request_id), request_id)
def test_get_metadata_checksums_correctly(self):
- parts = self._get_written_line().decode('ascii').strip().split(' ')
- expected_checksum = '{0:08x}'.format(
- crc32(' '.join(parts[3:]).encode('utf-8')) & 0xffffffff)
+ parts = self._get_written_line().decode("ascii").strip().split(" ")
+ expected_checksum = "{0:08x}".format(
+ crc32(" ".join(parts[3:]).encode("utf-8")) & 0xFFFFFFFF
+ )
checksum = parts[2]
self.assertEqual(expected_checksum, checksum)
def test_get_metadata_reads_a_line(self):
client = self._get_client()
- client.get('some_key')
+ client.get("some_key")
self.assertEqual(self.metasource_data_len, self.serial.read.call_count)
def test_get_metadata_returns_valid_value(self):
client = self._get_client()
- value = client.get('some_key')
+ value = client.get("some_key")
self.assertEqual(self.metadata_value, value)
def test_get_metadata_throws_exception_for_incorrect_length(self):
- self.response_parts['length'] = 0
+ self.response_parts["length"] = 0
client = self._get_client()
- self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException,
- client.get, 'some_key')
+ self.assertRaises(
+ DataSourceSmartOS.JoyentMetadataFetchException,
+ client.get,
+ "some_key",
+ )
def test_get_metadata_throws_exception_for_incorrect_crc(self):
- self.response_parts['crc'] = 'deadbeef'
+ self.response_parts["crc"] = "deadbeef"
client = self._get_client()
- self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException,
- client.get, 'some_key')
+ self.assertRaises(
+ DataSourceSmartOS.JoyentMetadataFetchException,
+ client.get,
+ "some_key",
+ )
def test_get_metadata_throws_exception_for_request_id_mismatch(self):
- self.response_parts['request_id'] = 'deadbeef'
+ self.response_parts["request_id"] = "deadbeef"
client = self._get_client()
- client._checksum = lambda _: self.response_parts['crc']
- self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException,
- client.get, 'some_key')
+ client._checksum = lambda _: self.response_parts["crc"]
+ self.assertRaises(
+ DataSourceSmartOS.JoyentMetadataFetchException,
+ client.get,
+ "some_key",
+ )
def test_get_metadata_returns_None_if_value_not_found(self):
- self.response_parts['payload'] = ''
- self.response_parts['command'] = 'NOTFOUND'
- self.response_parts['length'] = NOTFOUND_LEN
+ self.response_parts["payload"] = ""
+ self.response_parts["command"] = "NOTFOUND"
+ self.response_parts["length"] = NOTFOUND_LEN
client = self._get_client()
- client._checksum = lambda _: self.response_parts['crc']
- self.assertIsNone(client.get('some_key'))
+ client._checksum = lambda _: self.response_parts["crc"]
+ self.assertIsNone(client.get("some_key"))
def test_negotiate(self):
client = self._get_client()
@@ -879,55 +964,58 @@ class TestJoyentMetadataClient(FilesystemMockingTestCase):
def test_negotiate_short_response(self):
client = self._get_client()
# chopped '\n' from v2_ok.
- reader = ShortReader(self.v2_ok[:-1] + b'\0')
+ reader = ShortReader(self.v2_ok[:-1] + b"\0")
client.fp.read.side_effect = reader.read
- self.assertRaises(DataSourceSmartOS.JoyentMetadataTimeoutException,
- client._negotiate)
+ self.assertRaises(
+ DataSourceSmartOS.JoyentMetadataTimeoutException, client._negotiate
+ )
self.assertTrue(reader.emptied)
def test_negotiate_bad_response(self):
client = self._get_client()
- reader = ShortReader(b'garbage\n' + self.v2_ok)
+ reader = ShortReader(b"garbage\n" + self.v2_ok)
client.fp.read.side_effect = reader.read
- self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException,
- client._negotiate)
+ self.assertRaises(
+ DataSourceSmartOS.JoyentMetadataFetchException, client._negotiate
+ )
self.assertEqual(self.v2_ok, client.fp.read())
def test_serial_open_transport(self):
client = self._get_serial_client()
- reader = ShortReader(b'garbage\0' + self.invalid + self.v2_ok)
+ reader = ShortReader(b"garbage\0" + self.invalid + self.v2_ok)
client.fp.read.side_effect = reader.read
client.open_transport()
self.assertTrue(reader.emptied)
def test_flush_failure(self):
client = self._get_serial_client()
- reader = ShortReader(b'garbage' + b'\0' + self.failure +
- self.invalid + self.v2_ok)
+ reader = ShortReader(
+ b"garbage" + b"\0" + self.failure + self.invalid + self.v2_ok
+ )
client.fp.read.side_effect = reader.read
client.open_transport()
self.assertTrue(reader.emptied)
def test_flush_many_timeouts(self):
client = self._get_serial_client()
- reader = ShortReader(b'\0' * 100 + self.invalid + self.v2_ok)
+ reader = ShortReader(b"\0" * 100 + self.invalid + self.v2_ok)
client.fp.read.side_effect = reader.read
client.open_transport()
self.assertTrue(reader.emptied)
def test_list_metadata_returns_list(self):
- parts = ['foo', 'bar']
- value = b64e('\n'.join(parts))
- self.response_parts['payload'] = value
- self.response_parts['crc'] = '40873553'
- self.response_parts['length'] = SUCCESS_LEN + len(value)
+ parts = ["foo", "bar"]
+ value = b64e("\n".join(parts))
+ self.response_parts["payload"] = value
+ self.response_parts["crc"] = "40873553"
+ self.response_parts["length"] = SUCCESS_LEN + len(value)
client = self._get_client()
self.assertEqual(client.list(), parts)
def test_list_metadata_returns_empty_list_if_no_customer_metadata(self):
- del self.response_parts['payload']
- self.response_parts['length'] = SUCCESS_LEN - 1
- self.response_parts['crc'] = '14e563ba'
+ del self.response_parts["payload"]
+ self.response_parts["length"] = SUCCESS_LEN - 1
+ self.response_parts["crc"] = "14e563ba"
client = self._get_client()
self.assertEqual(client.list(), [])
@@ -935,181 +1023,354 @@ class TestJoyentMetadataClient(FilesystemMockingTestCase):
class TestNetworkConversion(CiTestCase):
def test_convert_simple(self):
expected = {
- 'version': 1,
- 'config': [
- {'name': 'net0', 'type': 'physical',
- 'subnets': [{'type': 'static', 'gateway': '8.12.42.1',
- 'address': '8.12.42.102/24'}],
- 'mtu': 1500, 'mac_address': '90:b8:d0:f5:e4:f5'},
- {'name': 'net1', 'type': 'physical',
- 'subnets': [{'type': 'static',
- 'address': '192.168.128.93/22'}],
- 'mtu': 8500, 'mac_address': '90:b8:d0:a5:ff:cd'}]}
+ "version": 1,
+ "config": [
+ {
+ "name": "net0",
+ "type": "physical",
+ "subnets": [
+ {
+ "type": "static",
+ "gateway": "8.12.42.1",
+ "address": "8.12.42.102/24",
+ }
+ ],
+ "mtu": 1500,
+ "mac_address": "90:b8:d0:f5:e4:f5",
+ },
+ {
+ "name": "net1",
+ "type": "physical",
+ "subnets": [
+ {"type": "static", "address": "192.168.128.93/22"}
+ ],
+ "mtu": 8500,
+ "mac_address": "90:b8:d0:a5:ff:cd",
+ },
+ ],
+ }
found = convert_net(SDC_NICS)
self.assertEqual(expected, found)
def test_convert_simple_alt(self):
expected = {
- 'version': 1,
- 'config': [
- {'name': 'net0', 'type': 'physical',
- 'subnets': [{'type': 'static', 'gateway': '8.12.42.1',
- 'address': '8.12.42.51/24'}],
- 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'},
- {'name': 'net1', 'type': 'physical',
- 'subnets': [{'type': 'static',
- 'address': '10.210.1.217/24'}],
- 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]}
+ "version": 1,
+ "config": [
+ {
+ "name": "net0",
+ "type": "physical",
+ "subnets": [
+ {
+ "type": "static",
+ "gateway": "8.12.42.1",
+ "address": "8.12.42.51/24",
+ }
+ ],
+ "mtu": 1500,
+ "mac_address": "90:b8:d0:ae:64:51",
+ },
+ {
+ "name": "net1",
+ "type": "physical",
+ "subnets": [
+ {"type": "static", "address": "10.210.1.217/24"}
+ ],
+ "mtu": 1500,
+ "mac_address": "90:b8:d0:bd:4f:9c",
+ },
+ ],
+ }
found = convert_net(SDC_NICS_ALT)
self.assertEqual(expected, found)
def test_convert_simple_dhcp(self):
expected = {
- 'version': 1,
- 'config': [
- {'name': 'net0', 'type': 'physical',
- 'subnets': [{'type': 'static', 'gateway': '8.12.42.1',
- 'address': '8.12.42.51/24'}],
- 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'},
- {'name': 'net1', 'type': 'physical',
- 'subnets': [{'type': 'dhcp4'}],
- 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]}
+ "version": 1,
+ "config": [
+ {
+ "name": "net0",
+ "type": "physical",
+ "subnets": [
+ {
+ "type": "static",
+ "gateway": "8.12.42.1",
+ "address": "8.12.42.51/24",
+ }
+ ],
+ "mtu": 1500,
+ "mac_address": "90:b8:d0:ae:64:51",
+ },
+ {
+ "name": "net1",
+ "type": "physical",
+ "subnets": [{"type": "dhcp4"}],
+ "mtu": 1500,
+ "mac_address": "90:b8:d0:bd:4f:9c",
+ },
+ ],
+ }
found = convert_net(SDC_NICS_DHCP)
self.assertEqual(expected, found)
def test_convert_simple_multi_ip(self):
expected = {
- 'version': 1,
- 'config': [
- {'name': 'net0', 'type': 'physical',
- 'subnets': [{'type': 'static', 'gateway': '8.12.42.1',
- 'address': '8.12.42.51/24'},
- {'type': 'static',
- 'address': '8.12.42.52/24'}],
- 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'},
- {'name': 'net1', 'type': 'physical',
- 'subnets': [{'type': 'static',
- 'address': '10.210.1.217/24'},
- {'type': 'static',
- 'address': '10.210.1.151/24'}],
- 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]}
+ "version": 1,
+ "config": [
+ {
+ "name": "net0",
+ "type": "physical",
+ "subnets": [
+ {
+ "type": "static",
+ "gateway": "8.12.42.1",
+ "address": "8.12.42.51/24",
+ },
+ {"type": "static", "address": "8.12.42.52/24"},
+ ],
+ "mtu": 1500,
+ "mac_address": "90:b8:d0:ae:64:51",
+ },
+ {
+ "name": "net1",
+ "type": "physical",
+ "subnets": [
+ {"type": "static", "address": "10.210.1.217/24"},
+ {"type": "static", "address": "10.210.1.151/24"},
+ ],
+ "mtu": 1500,
+ "mac_address": "90:b8:d0:bd:4f:9c",
+ },
+ ],
+ }
found = convert_net(SDC_NICS_MIP)
self.assertEqual(expected, found)
def test_convert_with_dns(self):
expected = {
- 'version': 1,
- 'config': [
- {'name': 'net0', 'type': 'physical',
- 'subnets': [{'type': 'static', 'gateway': '8.12.42.1',
- 'address': '8.12.42.51/24'}],
- 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'},
- {'name': 'net1', 'type': 'physical',
- 'subnets': [{'type': 'dhcp4'}],
- 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'},
- {'type': 'nameserver',
- 'address': ['8.8.8.8', '8.8.8.1'], 'search': ["local"]}]}
+ "version": 1,
+ "config": [
+ {
+ "name": "net0",
+ "type": "physical",
+ "subnets": [
+ {
+ "type": "static",
+ "gateway": "8.12.42.1",
+ "address": "8.12.42.51/24",
+ }
+ ],
+ "mtu": 1500,
+ "mac_address": "90:b8:d0:ae:64:51",
+ },
+ {
+ "name": "net1",
+ "type": "physical",
+ "subnets": [{"type": "dhcp4"}],
+ "mtu": 1500,
+ "mac_address": "90:b8:d0:bd:4f:9c",
+ },
+ {
+ "type": "nameserver",
+ "address": ["8.8.8.8", "8.8.8.1"],
+ "search": ["local"],
+ },
+ ],
+ }
found = convert_net(
- network_data=SDC_NICS_DHCP, dns_servers=['8.8.8.8', '8.8.8.1'],
- dns_domain="local")
+ network_data=SDC_NICS_DHCP,
+ dns_servers=["8.8.8.8", "8.8.8.1"],
+ dns_domain="local",
+ )
self.assertEqual(expected, found)
def test_convert_simple_multi_ipv6(self):
expected = {
- 'version': 1,
- 'config': [
- {'name': 'net0', 'type': 'physical',
- 'subnets': [{'type': 'static', 'address':
- '2001:4800:78ff:1b:be76:4eff:fe06:96b3/64'},
- {'type': 'static', 'gateway': '8.12.42.1',
- 'address': '8.12.42.51/24'}],
- 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'},
- {'name': 'net1', 'type': 'physical',
- 'subnets': [{'type': 'static',
- 'address': '10.210.1.217/24'}],
- 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]}
+ "version": 1,
+ "config": [
+ {
+ "name": "net0",
+ "type": "physical",
+ "subnets": [
+ {
+ "type": "static",
+ "address": (
+ "2001:4800:78ff:1b:be76:4eff:fe06:96b3/64"
+ ),
+ },
+ {
+ "type": "static",
+ "gateway": "8.12.42.1",
+ "address": "8.12.42.51/24",
+ },
+ ],
+ "mtu": 1500,
+ "mac_address": "90:b8:d0:ae:64:51",
+ },
+ {
+ "name": "net1",
+ "type": "physical",
+ "subnets": [
+ {"type": "static", "address": "10.210.1.217/24"}
+ ],
+ "mtu": 1500,
+ "mac_address": "90:b8:d0:bd:4f:9c",
+ },
+ ],
+ }
found = convert_net(SDC_NICS_MIP_IPV6)
self.assertEqual(expected, found)
def test_convert_simple_both_ipv4_ipv6(self):
expected = {
- 'version': 1,
- 'config': [
- {'mac_address': '90:b8:d0:ae:64:51', 'mtu': 1500,
- 'name': 'net0', 'type': 'physical',
- 'subnets': [{'address': '2001::10/64', 'gateway': '2001::1',
- 'type': 'static'},
- {'address': '8.12.42.51/24',
- 'gateway': '8.12.42.1',
- 'type': 'static'},
- {'address': '2001::11/64', 'type': 'static'},
- {'address': '8.12.42.52/32', 'type': 'static'}]},
- {'mac_address': '90:b8:d0:bd:4f:9c', 'mtu': 1500,
- 'name': 'net1', 'type': 'physical',
- 'subnets': [{'address': '10.210.1.217/24',
- 'type': 'static'}]}]}
+ "version": 1,
+ "config": [
+ {
+ "mac_address": "90:b8:d0:ae:64:51",
+ "mtu": 1500,
+ "name": "net0",
+ "type": "physical",
+ "subnets": [
+ {
+ "address": "2001::10/64",
+ "gateway": "2001::1",
+ "type": "static",
+ },
+ {
+ "address": "8.12.42.51/24",
+ "gateway": "8.12.42.1",
+ "type": "static",
+ },
+ {"address": "2001::11/64", "type": "static"},
+ {"address": "8.12.42.52/32", "type": "static"},
+ ],
+ },
+ {
+ "mac_address": "90:b8:d0:bd:4f:9c",
+ "mtu": 1500,
+ "name": "net1",
+ "type": "physical",
+ "subnets": [
+ {"address": "10.210.1.217/24", "type": "static"}
+ ],
+ },
+ ],
+ }
found = convert_net(SDC_NICS_IPV4_IPV6)
self.assertEqual(expected, found)
def test_gateways_not_on_all_nics(self):
expected = {
- 'version': 1,
- 'config': [
- {'mac_address': '90:b8:d0:d8:82:b4', 'mtu': 1500,
- 'name': 'net0', 'type': 'physical',
- 'subnets': [{'address': '8.12.42.26/24',
- 'gateway': '8.12.42.1', 'type': 'static'}]},
- {'mac_address': '90:b8:d0:0a:51:31', 'mtu': 1500,
- 'name': 'net1', 'type': 'physical',
- 'subnets': [{'address': '10.210.1.27/24',
- 'type': 'static'}]}]}
+ "version": 1,
+ "config": [
+ {
+ "mac_address": "90:b8:d0:d8:82:b4",
+ "mtu": 1500,
+ "name": "net0",
+ "type": "physical",
+ "subnets": [
+ {
+ "address": "8.12.42.26/24",
+ "gateway": "8.12.42.1",
+ "type": "static",
+ }
+ ],
+ },
+ {
+ "mac_address": "90:b8:d0:0a:51:31",
+ "mtu": 1500,
+ "name": "net1",
+ "type": "physical",
+ "subnets": [
+ {"address": "10.210.1.27/24", "type": "static"}
+ ],
+ },
+ ],
+ }
found = convert_net(SDC_NICS_SINGLE_GATEWAY)
self.assertEqual(expected, found)
def test_routes_on_all_nics(self):
routes = [
- {'linklocal': False, 'dst': '3.0.0.0/8', 'gateway': '8.12.42.3'},
- {'linklocal': False, 'dst': '4.0.0.0/8', 'gateway': '10.210.1.4'}]
+ {"linklocal": False, "dst": "3.0.0.0/8", "gateway": "8.12.42.3"},
+ {"linklocal": False, "dst": "4.0.0.0/8", "gateway": "10.210.1.4"},
+ ]
expected = {
- 'version': 1,
- 'config': [
- {'mac_address': '90:b8:d0:d8:82:b4', 'mtu': 1500,
- 'name': 'net0', 'type': 'physical',
- 'subnets': [{'address': '8.12.42.26/24',
- 'gateway': '8.12.42.1', 'type': 'static',
- 'routes': [{'network': '3.0.0.0/8',
- 'gateway': '8.12.42.3'},
- {'network': '4.0.0.0/8',
- 'gateway': '10.210.1.4'}]}]},
- {'mac_address': '90:b8:d0:0a:51:31', 'mtu': 1500,
- 'name': 'net1', 'type': 'physical',
- 'subnets': [{'address': '10.210.1.27/24', 'type': 'static',
- 'routes': [{'network': '3.0.0.0/8',
- 'gateway': '8.12.42.3'},
- {'network': '4.0.0.0/8',
- 'gateway': '10.210.1.4'}]}]}]}
+ "version": 1,
+ "config": [
+ {
+ "mac_address": "90:b8:d0:d8:82:b4",
+ "mtu": 1500,
+ "name": "net0",
+ "type": "physical",
+ "subnets": [
+ {
+ "address": "8.12.42.26/24",
+ "gateway": "8.12.42.1",
+ "type": "static",
+ "routes": [
+ {
+ "network": "3.0.0.0/8",
+ "gateway": "8.12.42.3",
+ },
+ {
+ "network": "4.0.0.0/8",
+ "gateway": "10.210.1.4",
+ },
+ ],
+ }
+ ],
+ },
+ {
+ "mac_address": "90:b8:d0:0a:51:31",
+ "mtu": 1500,
+ "name": "net1",
+ "type": "physical",
+ "subnets": [
+ {
+ "address": "10.210.1.27/24",
+ "type": "static",
+ "routes": [
+ {
+ "network": "3.0.0.0/8",
+ "gateway": "8.12.42.3",
+ },
+ {
+ "network": "4.0.0.0/8",
+ "gateway": "10.210.1.4",
+ },
+ ],
+ }
+ ],
+ },
+ ],
+ }
found = convert_net(SDC_NICS_SINGLE_GATEWAY, routes=routes)
self.maxDiff = None
self.assertEqual(expected, found)
-@unittest.skipUnless(get_smartos_environ() == SMARTOS_ENV_KVM,
- "Only supported on KVM and bhyve guests under SmartOS")
-@unittest.skipUnless(os.access(SERIAL_DEVICE, os.W_OK),
- "Requires write access to " + SERIAL_DEVICE)
+@unittest.skipUnless(
+ get_smartos_environ() == SMARTOS_ENV_KVM,
+ "Only supported on KVM and bhyve guests under SmartOS",
+)
+@unittest.skipUnless(
+ os.access(SERIAL_DEVICE, os.W_OK),
+ "Requires write access to " + SERIAL_DEVICE,
+)
@unittest.skipUnless(HAS_PYSERIAL is True, "pyserial not available")
class TestSerialConcurrency(CiTestCase):
"""
- This class tests locking on an actual serial port, and as such can only
- be run in a kvm or bhyve guest running on a SmartOS host. A test run on
- a metadata socket will not be valid because a metadata socket ensures
- there is only one session over a connection. In contrast, in the
- absence of proper locking multiple processes opening the same serial
- port can corrupt each others' exchanges with the metadata server.
-
- This takes on the order of 2 to 3 minutes to run.
+ This class tests locking on an actual serial port, and as such can only
+ be run in a kvm or bhyve guest running on a SmartOS host. A test run on
+ a metadata socket will not be valid because a metadata socket ensures
+ there is only one session over a connection. In contrast, in the
+ absence of proper locking multiple processes opening the same serial
+ port can corrupt each others' exchanges with the metadata server.
+
+ This takes on the order of 2 to 3 minutes to run.
"""
- allowed_subp = ['mdata-get']
+
+ allowed_subp = ["mdata-get"]
def setUp(self):
self.mdata_proc = multiprocessing.Process(target=self.start_mdata_loop)
@@ -1124,16 +1385,16 @@ class TestSerialConcurrency(CiTestCase):
def start_mdata_loop(self):
"""
- The mdata-get command is repeatedly run in a separate process so
- that it may try to race with metadata operations performed in the
- main test process. Use of mdata-get is better than two processes
- using the protocol implementation in DataSourceSmartOS because we
- are testing to be sure that cloud-init and mdata-get respect each
- others locks.
+ The mdata-get command is repeatedly run in a separate process so
+ that it may try to race with metadata operations performed in the
+ main test process. Use of mdata-get is better than two processes
+ using the protocol implementation in DataSourceSmartOS because we
+ are testing to be sure that cloud-init and mdata-get respect each
+ others locks.
"""
rcs = list(range(0, 256))
while True:
- subp(['mdata-get', 'sdc:routes'], rcs=rcs)
+ subp(["mdata-get", "sdc:routes"], rcs=rcs)
def test_all_keys(self):
self.assertIsNotNone(self.mdata_proc.pid)
@@ -1156,4 +1417,5 @@ class TestSerialConcurrency(CiTestCase):
self.assertIsNone(self.mdata_proc.exitcode)
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_upcloud.py b/tests/unittests/sources/test_upcloud.py
new file mode 100644
index 00000000..e1125b65
--- /dev/null
+++ b/tests/unittests/sources/test_upcloud.py
@@ -0,0 +1,331 @@
+# Author: Antti Myyrä <antti.myyra@upcloud.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import json
+
+from cloudinit import helpers, settings, sources
+from cloudinit.sources.DataSourceUpCloud import (
+ DataSourceUpCloud,
+ DataSourceUpCloudLocal,
+)
+from tests.unittests.helpers import CiTestCase, mock
+
+UC_METADATA = json.loads(
+ """
+{
+ "cloud_name": "upcloud",
+ "instance_id": "00322b68-0096-4042-9406-faad61922128",
+ "hostname": "test.example.com",
+ "platform": "servers",
+ "subplatform": "metadata (http://169.254.169.254)",
+ "public_keys": [
+ "ssh-rsa AAAAB.... test1@example.com",
+ "ssh-rsa AAAAB.... test2@example.com"
+ ],
+ "region": "fi-hel2",
+ "network": {
+ "interfaces": [
+ {
+ "index": 1,
+ "ip_addresses": [
+ {
+ "address": "94.237.105.53",
+ "dhcp": true,
+ "dns": [
+ "94.237.127.9",
+ "94.237.40.9"
+ ],
+ "family": "IPv4",
+ "floating": false,
+ "gateway": "94.237.104.1",
+ "network": "94.237.104.0/22"
+ },
+ {
+ "address": "94.237.105.50",
+ "dhcp": false,
+ "dns": null,
+ "family": "IPv4",
+ "floating": true,
+ "gateway": "",
+ "network": "94.237.105.50/32"
+ }
+ ],
+ "mac": "3a:d6:ba:4a:36:e7",
+ "network_id": "031457f4-0f8c-483c-96f2-eccede02909c",
+ "type": "public"
+ },
+ {
+ "index": 2,
+ "ip_addresses": [
+ {
+ "address": "10.6.3.27",
+ "dhcp": true,
+ "dns": null,
+ "family": "IPv4",
+ "floating": false,
+ "gateway": "10.6.0.1",
+ "network": "10.6.0.0/22"
+ }
+ ],
+ "mac": "3a:d6:ba:4a:84:cc",
+ "network_id": "03d82553-5bea-4132-b29a-e1cf67ec2dd1",
+ "type": "utility"
+ },
+ {
+ "index": 3,
+ "ip_addresses": [
+ {
+ "address": "2a04:3545:1000:720:38d6:baff:fe4a:63e7",
+ "dhcp": true,
+ "dns": [
+ "2a04:3540:53::1",
+ "2a04:3544:53::1"
+ ],
+ "family": "IPv6",
+ "floating": false,
+ "gateway": "2a04:3545:1000:720::1",
+ "network": "2a04:3545:1000:720::/64"
+ }
+ ],
+ "mac": "3a:d6:ba:4a:63:e7",
+ "network_id": "03000000-0000-4000-8046-000000000000",
+ "type": "public"
+ },
+ {
+ "index": 4,
+ "ip_addresses": [
+ {
+ "address": "172.30.1.10",
+ "dhcp": true,
+ "dns": null,
+ "family": "IPv4",
+ "floating": false,
+ "gateway": "172.30.1.1",
+ "network": "172.30.1.0/24"
+ }
+ ],
+ "mac": "3a:d6:ba:4a:8a:e1",
+ "network_id": "035a0a4a-7704-4de5-820d-189fc8132714",
+ "type": "private"
+ }
+ ],
+ "dns": [
+ "94.237.127.9",
+ "94.237.40.9"
+ ]
+ },
+ "storage": {
+ "disks": [
+ {
+ "id": "014efb65-223b-4d44-8f0a-c29535b88dcf",
+ "serial": "014efb65223b4d448f0a",
+ "size": 10240,
+ "type": "disk",
+ "tier": "maxiops"
+ }
+ ]
+ },
+ "tags": [],
+ "user_data": "",
+ "vendor_data": ""
+}
+"""
+)
+
+UC_METADATA[
+ "user_data"
+] = b"""#cloud-config
+runcmd:
+- [touch, /root/cloud-init-worked ]
+"""
+
+MD_URL = "http://169.254.169.254/metadata/v1.json"
+
+
+def _mock_dmi():
+ return True, "00322b68-0096-4042-9406-faad61922128"
+
+
+class TestUpCloudMetadata(CiTestCase):
+ """
+ Test reading the meta-data
+ """
+
+ def setUp(self):
+ super(TestUpCloudMetadata, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ def get_ds(self, get_sysinfo=_mock_dmi):
+ ds = DataSourceUpCloud(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ if get_sysinfo:
+ ds._get_sysinfo = get_sysinfo
+ return ds
+
+ @mock.patch("cloudinit.sources.helpers.upcloud.read_sysinfo")
+ def test_returns_false_not_on_upcloud(self, m_read_sysinfo):
+ m_read_sysinfo.return_value = (False, None)
+ ds = self.get_ds(get_sysinfo=None)
+ self.assertEqual(False, ds.get_data())
+ self.assertTrue(m_read_sysinfo.called)
+
+ @mock.patch("cloudinit.sources.helpers.upcloud.read_metadata")
+ def test_metadata(self, mock_readmd):
+ mock_readmd.return_value = UC_METADATA.copy()
+
+ ds = self.get_ds()
+ ds.perform_dhcp_setup = False
+
+ ret = ds.get_data()
+ self.assertTrue(ret)
+
+ self.assertTrue(mock_readmd.called)
+
+ self.assertEqual(UC_METADATA.get("user_data"), ds.get_userdata_raw())
+ self.assertEqual(
+ UC_METADATA.get("vendor_data"), ds.get_vendordata_raw()
+ )
+ self.assertEqual(UC_METADATA.get("region"), ds.availability_zone)
+ self.assertEqual(UC_METADATA.get("instance_id"), ds.get_instance_id())
+ self.assertEqual(UC_METADATA.get("cloud_name"), ds.cloud_name)
+
+ self.assertEqual(
+ UC_METADATA.get("public_keys"), ds.get_public_ssh_keys()
+ )
+ self.assertIsInstance(ds.get_public_ssh_keys(), list)
+
+
+class TestUpCloudNetworkSetup(CiTestCase):
+ """
+ Test reading the meta-data on networked context
+ """
+
+ def setUp(self):
+ super(TestUpCloudNetworkSetup, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ def get_ds(self, get_sysinfo=_mock_dmi):
+ ds = DataSourceUpCloudLocal(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+ if get_sysinfo:
+ ds._get_sysinfo = get_sysinfo
+ return ds
+
+ @mock.patch("cloudinit.sources.helpers.upcloud.read_metadata")
+ @mock.patch("cloudinit.net.find_fallback_nic")
+ @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+ @mock.patch("cloudinit.net.dhcp.EphemeralIPv4Network")
+ def test_network_configured_metadata(
+ self, m_net, m_dhcp, m_fallback_nic, mock_readmd
+ ):
+ mock_readmd.return_value = UC_METADATA.copy()
+
+ m_fallback_nic.return_value = "eth1"
+ m_dhcp.return_value = [
+ {
+ "interface": "eth1",
+ "fixed-address": "10.6.3.27",
+ "routers": "10.6.0.1",
+ "subnet-mask": "22",
+ "broadcast-address": "10.6.3.255",
+ }
+ ]
+
+ ds = self.get_ds()
+
+ ret = ds.get_data()
+ self.assertTrue(ret)
+
+ self.assertTrue(m_dhcp.called)
+ m_dhcp.assert_called_with("eth1", None)
+
+ m_net.assert_called_once_with(
+ broadcast="10.6.3.255",
+ interface="eth1",
+ ip="10.6.3.27",
+ prefix_or_mask="22",
+ router="10.6.0.1",
+ static_routes=None,
+ )
+
+ self.assertTrue(mock_readmd.called)
+
+ self.assertEqual(UC_METADATA.get("region"), ds.availability_zone)
+ self.assertEqual(UC_METADATA.get("instance_id"), ds.get_instance_id())
+ self.assertEqual(UC_METADATA.get("cloud_name"), ds.cloud_name)
+
+ @mock.patch("cloudinit.sources.helpers.upcloud.read_metadata")
+ @mock.patch("cloudinit.net.get_interfaces_by_mac")
+ def test_network_configuration(self, m_get_by_mac, mock_readmd):
+ mock_readmd.return_value = UC_METADATA.copy()
+
+ raw_ifaces = UC_METADATA.get("network").get("interfaces")
+ self.assertEqual(4, len(raw_ifaces))
+
+ m_get_by_mac.return_value = {
+ raw_ifaces[0].get("mac"): "eth0",
+ raw_ifaces[1].get("mac"): "eth1",
+ raw_ifaces[2].get("mac"): "eth2",
+ raw_ifaces[3].get("mac"): "eth3",
+ }
+
+ ds = self.get_ds()
+ ds.perform_dhcp_setup = False
+
+ ret = ds.get_data()
+ self.assertTrue(ret)
+
+ self.assertTrue(mock_readmd.called)
+
+ netcfg = ds.network_config
+
+ self.assertEqual(1, netcfg.get("version"))
+
+ config = netcfg.get("config")
+ self.assertIsInstance(config, list)
+ self.assertEqual(5, len(config))
+ self.assertEqual("physical", config[3].get("type"))
+
+ self.assertEqual(
+ raw_ifaces[2].get("mac"), config[2].get("mac_address")
+ )
+ self.assertEqual(1, len(config[2].get("subnets")))
+ self.assertEqual(
+ "ipv6_dhcpv6-stateless", config[2].get("subnets")[0].get("type")
+ )
+
+ self.assertEqual(2, len(config[0].get("subnets")))
+ self.assertEqual("static", config[0].get("subnets")[1].get("type"))
+
+ dns = config[4]
+ self.assertEqual("nameserver", dns.get("type"))
+ self.assertEqual(2, len(dns.get("address")))
+ self.assertEqual(
+ UC_METADATA.get("network").get("dns")[1], dns.get("address")[1]
+ )
+
+
+class TestUpCloudDatasourceLoading(CiTestCase):
+ def test_get_datasource_list_returns_in_local(self):
+ deps = (sources.DEP_FILESYSTEM,)
+ ds_list = sources.DataSourceUpCloud.get_datasource_list(deps)
+ self.assertEqual(ds_list, [DataSourceUpCloudLocal])
+
+ def test_get_datasource_list_returns_in_normal(self):
+ deps = (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)
+ ds_list = sources.DataSourceUpCloud.get_datasource_list(deps)
+ self.assertEqual(ds_list, [DataSourceUpCloud])
+
+ def test_list_sources_finds_ds(self):
+ found = sources.list_sources(
+ ["UpCloud"],
+ (sources.DEP_FILESYSTEM, sources.DEP_NETWORK),
+ ["cloudinit.sources"],
+ )
+ self.assertEqual([DataSourceUpCloud], found)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_vmware.py b/tests/unittests/sources/test_vmware.py
new file mode 100644
index 00000000..dd331349
--- /dev/null
+++ b/tests/unittests/sources/test_vmware.py
@@ -0,0 +1,389 @@
+# Copyright (c) 2021 VMware, Inc. All Rights Reserved.
+#
+# Authors: Andrew Kutz <akutz@vmware.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import base64
+import gzip
+import os
+
+import pytest
+
+from cloudinit import dmi, helpers, safeyaml, settings
+from cloudinit.sources import DataSourceVMware
+from tests.unittests.helpers import (
+ CiTestCase,
+ FilesystemMockingTestCase,
+ mock,
+ populate_dir,
+)
+
+PRODUCT_NAME_FILE_PATH = "/sys/class/dmi/id/product_name"
+PRODUCT_NAME = "VMware7,1"
+PRODUCT_UUID = "82343CED-E4C7-423B-8F6B-0D34D19067AB"
+REROOT_FILES = {
+ DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID,
+ PRODUCT_NAME_FILE_PATH: PRODUCT_NAME,
+}
+
+VMW_MULTIPLE_KEYS = [
+ "ssh-rsa AAAAB3NzaC1yc2EAAAA... test1@vmw.com",
+ "ssh-rsa AAAAB3NzaC1yc2EAAAA... test2@vmw.com",
+]
+VMW_SINGLE_KEY = "ssh-rsa AAAAB3NzaC1yc2EAAAA... test@vmw.com"
+
+VMW_METADATA_YAML = """instance-id: cloud-vm
+local-hostname: cloud-vm
+network:
+ version: 2
+ ethernets:
+ nics:
+ match:
+ name: ens*
+ dhcp4: yes
+"""
+
+VMW_USERDATA_YAML = """## template: jinja
+#cloud-config
+users:
+- default
+"""
+
+VMW_VENDORDATA_YAML = """## template: jinja
+#cloud-config
+runcmd:
+- echo "Hello, world."
+"""
+
+
+@pytest.fixture(autouse=True)
+def common_patches():
+ with mock.patch("cloudinit.util.platform.platform", return_value="Linux"):
+ with mock.patch.multiple(
+ "cloudinit.dmi",
+ is_container=mock.Mock(return_value=False),
+ is_FreeBSD=mock.Mock(return_value=False),
+ ):
+ yield
+
+
+class TestDataSourceVMware(CiTestCase):
+ """
+ Test common functionality that is not transport specific.
+ """
+
+ def setUp(self):
+ super(TestDataSourceVMware, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ def test_no_data_access_method(self):
+ ds = get_ds(self.tmp)
+ ds.vmware_rpctool = None
+ ret = ds.get_data()
+ self.assertFalse(ret)
+
+ def test_get_host_info(self):
+ host_info = DataSourceVMware.get_host_info()
+ self.assertTrue(host_info)
+ self.assertTrue(host_info["hostname"])
+ self.assertTrue(host_info["local-hostname"])
+ self.assertTrue(host_info["local_hostname"])
+ self.assertTrue(host_info[DataSourceVMware.LOCAL_IPV4])
+
+
+class TestDataSourceVMwareEnvVars(FilesystemMockingTestCase):
+ """
+ Test the envvar transport.
+ """
+
+ def setUp(self):
+ super(TestDataSourceVMwareEnvVars, self).setUp()
+ self.tmp = self.tmp_dir()
+ os.environ[DataSourceVMware.VMX_GUESTINFO] = "1"
+ self.create_system_files()
+
+ def tearDown(self):
+ del os.environ[DataSourceVMware.VMX_GUESTINFO]
+ return super(TestDataSourceVMwareEnvVars, self).tearDown()
+
+ def create_system_files(self):
+ rootd = self.tmp_dir()
+ populate_dir(
+ rootd,
+ {
+ DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID,
+ },
+ )
+ self.assertTrue(self.reRoot(rootd))
+
+ def assert_get_data_ok(self, m_fn, m_fn_call_count=6):
+ ds = get_ds(self.tmp)
+ ds.vmware_rpctool = None
+ ret = ds.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(m_fn_call_count, m_fn.call_count)
+ self.assertEqual(
+ ds.data_access_method, DataSourceVMware.DATA_ACCESS_METHOD_ENVVAR
+ )
+ return ds
+
+ def assert_metadata(self, metadata, m_fn, m_fn_call_count=6):
+ ds = self.assert_get_data_ok(m_fn, m_fn_call_count)
+ assert_metadata(self, ds, metadata)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_get_subplatform(self, m_fn):
+ m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""]
+ ds = self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+ self.assertEqual(
+ ds.subplatform,
+ "%s (%s)"
+ % (
+ DataSourceVMware.DATA_ACCESS_METHOD_ENVVAR,
+ DataSourceVMware.get_guestinfo_envvar_key_name("metadata"),
+ ),
+ )
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_get_data_metadata_only(self, m_fn):
+ m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_get_data_userdata_only(self, m_fn):
+ m_fn.side_effect = ["", VMW_USERDATA_YAML, "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_get_data_vendordata_only(self, m_fn):
+ m_fn.side_effect = ["", "", VMW_VENDORDATA_YAML, ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_get_data_metadata_base64(self, m_fn):
+ data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8"))
+ m_fn.side_effect = [data, "base64", "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_get_data_metadata_b64(self, m_fn):
+ data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8"))
+ m_fn.side_effect = [data, "b64", "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_get_data_metadata_gzip_base64(self, m_fn):
+ data = VMW_METADATA_YAML.encode("utf-8")
+ data = gzip.compress(data)
+ data = base64.b64encode(data)
+ m_fn.side_effect = [data, "gzip+base64", "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_get_data_metadata_gz_b64(self, m_fn):
+ data = VMW_METADATA_YAML.encode("utf-8")
+ data = gzip.compress(data)
+ data = base64.b64encode(data)
+ m_fn.side_effect = [data, "gz+b64", "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_metadata_single_ssh_key(self, m_fn):
+ metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML)
+ metadata["public_keys"] = VMW_SINGLE_KEY
+ metadata_yaml = safeyaml.dumps(metadata)
+ m_fn.side_effect = [metadata_yaml, "", "", ""]
+ self.assert_metadata(metadata, m_fn, m_fn_call_count=4)
+
+ @mock.patch(
+ "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value"
+ )
+ def test_metadata_multiple_ssh_keys(self, m_fn):
+ metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML)
+ metadata["public_keys"] = VMW_MULTIPLE_KEYS
+ metadata_yaml = safeyaml.dumps(metadata)
+ m_fn.side_effect = [metadata_yaml, "", "", ""]
+ self.assert_metadata(metadata, m_fn, m_fn_call_count=4)
+
+
+class TestDataSourceVMwareGuestInfo(FilesystemMockingTestCase):
+ """
+ Test the guestinfo transport on a VMware platform.
+ """
+
+ def setUp(self):
+ super(TestDataSourceVMwareGuestInfo, self).setUp()
+ self.tmp = self.tmp_dir()
+ self.create_system_files()
+
+ def create_system_files(self):
+ rootd = self.tmp_dir()
+ populate_dir(
+ rootd,
+ {
+ DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID,
+ PRODUCT_NAME_FILE_PATH: PRODUCT_NAME,
+ },
+ )
+ self.assertTrue(self.reRoot(rootd))
+
+ def assert_get_data_ok(self, m_fn, m_fn_call_count=6):
+ ds = get_ds(self.tmp)
+ ds.vmware_rpctool = "vmware-rpctool"
+ ret = ds.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(m_fn_call_count, m_fn.call_count)
+ self.assertEqual(
+ ds.data_access_method,
+ DataSourceVMware.DATA_ACCESS_METHOD_GUESTINFO,
+ )
+ return ds
+
+ def assert_metadata(self, metadata, m_fn, m_fn_call_count=6):
+ ds = self.assert_get_data_ok(m_fn, m_fn_call_count)
+ assert_metadata(self, ds, metadata)
+
+ def test_ds_valid_on_vmware_platform(self):
+ system_type = dmi.read_dmi_data("system-product-name")
+ self.assertEqual(system_type, PRODUCT_NAME)
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_get_subplatform(self, m_fn):
+ m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""]
+ ds = self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+ self.assertEqual(
+ ds.subplatform,
+ "%s (%s)"
+ % (
+ DataSourceVMware.DATA_ACCESS_METHOD_GUESTINFO,
+ DataSourceVMware.get_guestinfo_key_name("metadata"),
+ ),
+ )
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_get_data_userdata_only(self, m_fn):
+ m_fn.side_effect = ["", VMW_USERDATA_YAML, "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_get_data_vendordata_only(self, m_fn):
+ m_fn.side_effect = ["", "", VMW_VENDORDATA_YAML, ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_metadata_single_ssh_key(self, m_fn):
+ metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML)
+ metadata["public_keys"] = VMW_SINGLE_KEY
+ metadata_yaml = safeyaml.dumps(metadata)
+ m_fn.side_effect = [metadata_yaml, "", "", ""]
+ self.assert_metadata(metadata, m_fn, m_fn_call_count=4)
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_metadata_multiple_ssh_keys(self, m_fn):
+ metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML)
+ metadata["public_keys"] = VMW_MULTIPLE_KEYS
+ metadata_yaml = safeyaml.dumps(metadata)
+ m_fn.side_effect = [metadata_yaml, "", "", ""]
+ self.assert_metadata(metadata, m_fn, m_fn_call_count=4)
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_get_data_metadata_base64(self, m_fn):
+ data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8"))
+ m_fn.side_effect = [data, "base64", "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_get_data_metadata_b64(self, m_fn):
+ data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8"))
+ m_fn.side_effect = [data, "b64", "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_get_data_metadata_gzip_base64(self, m_fn):
+ data = VMW_METADATA_YAML.encode("utf-8")
+ data = gzip.compress(data)
+ data = base64.b64encode(data)
+ m_fn.side_effect = [data, "gzip+base64", "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_get_data_metadata_gz_b64(self, m_fn):
+ data = VMW_METADATA_YAML.encode("utf-8")
+ data = gzip.compress(data)
+ data = base64.b64encode(data)
+ m_fn.side_effect = [data, "gz+b64", "", ""]
+ self.assert_get_data_ok(m_fn, m_fn_call_count=4)
+
+
+class TestDataSourceVMwareGuestInfo_InvalidPlatform(FilesystemMockingTestCase):
+ """
+ Test the guestinfo transport on a non-VMware platform.
+ """
+
+ def setUp(self):
+ super(TestDataSourceVMwareGuestInfo_InvalidPlatform, self).setUp()
+ self.tmp = self.tmp_dir()
+ self.create_system_files()
+
+ def create_system_files(self):
+ rootd = self.tmp_dir()
+ populate_dir(
+ rootd,
+ {
+ DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID,
+ },
+ )
+ self.assertTrue(self.reRoot(rootd))
+
+ @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value")
+ def test_ds_invalid_on_non_vmware_platform(self, m_fn):
+ system_type = dmi.read_dmi_data("system-product-name")
+ self.assertEqual(system_type, None)
+
+ m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""]
+ ds = get_ds(self.tmp)
+ ds.vmware_rpctool = "vmware-rpctool"
+ ret = ds.get_data()
+ self.assertFalse(ret)
+
+
+def assert_metadata(test_obj, ds, metadata):
+ test_obj.assertEqual(metadata.get("instance-id"), ds.get_instance_id())
+ test_obj.assertEqual(metadata.get("local-hostname"), ds.get_hostname())
+
+ expected_public_keys = metadata.get("public_keys")
+ if not isinstance(expected_public_keys, list):
+ expected_public_keys = [expected_public_keys]
+
+ test_obj.assertEqual(expected_public_keys, ds.get_public_ssh_keys())
+ test_obj.assertIsInstance(ds.get_public_ssh_keys(), list)
+
+
+def get_ds(temp_dir):
+ ds = DataSourceVMware.DataSourceVMware(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": temp_dir})
+ )
+ ds.vmware_rpctool = "vmware-rpctool"
+ return ds
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_vultr.py b/tests/unittests/sources/test_vultr.py
new file mode 100644
index 00000000..18b2c084
--- /dev/null
+++ b/tests/unittests/sources/test_vultr.py
@@ -0,0 +1,339 @@
+# Author: Eric Benner <ebenner@vultr.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+# Vultr Metadata API:
+# https://www.vultr.com/metadata/
+
+import json
+
+from cloudinit import helpers, settings
+from cloudinit.net.dhcp import NoDHCPLeaseError
+from cloudinit.sources import DataSourceVultr
+from cloudinit.sources.helpers import vultr
+from tests.unittests.helpers import CiTestCase, mock
+
+# Vultr metadata test data
+VULTR_V1_1 = {
+ "bgp": {
+ "ipv4": {
+ "my-address": "",
+ "my-asn": "",
+ "peer-address": "",
+ "peer-asn": "",
+ },
+ "ipv6": {
+ "my-address": "",
+ "my-asn": "",
+ "peer-address": "",
+ "peer-asn": "",
+ },
+ },
+ "hostname": "CLOUDINIT_1",
+ "instanceid": "42506325",
+ "interfaces": [
+ {
+ "ipv4": {
+ "additional": [],
+ "address": "108.61.89.242",
+ "gateway": "108.61.89.1",
+ "netmask": "255.255.255.0",
+ },
+ "ipv6": {
+ "additional": [],
+ "address": "2001:19f0:5:56c2:5400:03ff:fe15:c465",
+ "network": "2001:19f0:5:56c2::",
+ "prefix": "64",
+ },
+ "mac": "56:00:03:15:c4:65",
+ "network-type": "public",
+ }
+ ],
+ "public-keys": ["ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key"],
+ "region": {"regioncode": "EWR"},
+ "user-defined": [],
+ "startup-script": "echo No configured startup script",
+ "raid1-script": "",
+ "user-data": [],
+ "vendor-data": [
+ {
+ "package_upgrade": "true",
+ "disable_root": 0,
+ "ssh_pwauth": 1,
+ "chpasswd": {
+ "expire": False,
+ "list": ["root:$6$S2Smuj.../VqxmIR9Urw0jPZ88i4yvB/"],
+ },
+ "system_info": {"default_user": {"name": "root"}},
+ }
+ ],
+}
+
+VULTR_V1_2 = {
+ "bgp": {
+ "ipv4": {
+ "my-address": "",
+ "my-asn": "",
+ "peer-address": "",
+ "peer-asn": "",
+ },
+ "ipv6": {
+ "my-address": "",
+ "my-asn": "",
+ "peer-address": "",
+ "peer-asn": "",
+ },
+ },
+ "hostname": "CLOUDINIT_2",
+ "instance-v2-id": "29bea708-2e6e-480a-90ad-0e6b5d5ad62f",
+ "instanceid": "42872224",
+ "interfaces": [
+ {
+ "ipv4": {
+ "additional": [],
+ "address": "45.76.7.171",
+ "gateway": "45.76.6.1",
+ "netmask": "255.255.254.0",
+ },
+ "ipv6": {
+ "additional": [
+ {"network": "2002:19f0:5:28a7::", "prefix": "64"}
+ ],
+ "address": "2001:19f0:5:28a7:5400:03ff:fe1b:4eca",
+ "network": "2001:19f0:5:28a7::",
+ "prefix": "64",
+ },
+ "mac": "56:00:03:1b:4e:ca",
+ "network-type": "public",
+ },
+ {
+ "ipv4": {
+ "additional": [],
+ "address": "10.1.112.3",
+ "gateway": "",
+ "netmask": "255.255.240.0",
+ },
+ "ipv6": {"additional": [], "network": "", "prefix": ""},
+ "mac": "5a:00:03:1b:4e:ca",
+ "network-type": "private",
+ "network-v2-id": "fbbe2b5b-b986-4396-87f5-7246660ccb64",
+ "networkid": "net5e7155329d730",
+ },
+ ],
+ "public-keys": ["ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key"],
+ "region": {"regioncode": "EWR"},
+ "user-defined": [],
+ "startup-script": "echo No configured startup script",
+ "user-data": [],
+ "vendor-data": [
+ {
+ "package_upgrade": "true",
+ "disable_root": 0,
+ "ssh_pwauth": 1,
+ "chpasswd": {
+ "expire": False,
+ "list": ["root:$6$SxXx...k2mJNIzZB5vMCDBlYT1"],
+ },
+ "system_info": {"default_user": {"name": "root"}},
+ }
+ ],
+}
+
+SSH_KEYS_1 = ["ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key"]
+
+INTERFACES = [
+ ["lo", "56:00:03:15:c4:00", "drv", "devid0"],
+ ["dummy0", "56:00:03:15:c4:01", "drv", "devid1"],
+ ["eth1", "56:00:03:15:c4:02", "drv", "devid2"],
+ ["eth0", "56:00:03:15:c4:04", "drv", "devid4"],
+ ["eth2", "56:00:03:15:c4:03", "drv", "devid3"],
+]
+
+# Expected generated objects
+
+# Expected config
+EXPECTED_VULTR_CONFIG = {
+ "package_upgrade": "true",
+ "disable_root": 0,
+ "ssh_pwauth": 1,
+ "chpasswd": {
+ "expire": False,
+ "list": ["root:$6$SxXx...k2mJNIzZB5vMCDBlYT1"],
+ },
+ "system_info": {"default_user": {"name": "root"}},
+}
+
+# Expected network config object from generator
+EXPECTED_VULTR_NETWORK_1 = {
+ "version": 1,
+ "config": [
+ {"type": "nameserver", "address": ["108.61.10.10"]},
+ {
+ "name": "eth0",
+ "type": "physical",
+ "mac_address": "56:00:03:15:c4:65",
+ "accept-ra": 1,
+ "subnets": [
+ {"type": "dhcp", "control": "auto"},
+ {"type": "ipv6_slaac", "control": "auto"},
+ ],
+ },
+ ],
+}
+
+EXPECTED_VULTR_NETWORK_2 = {
+ "version": 1,
+ "config": [
+ {"type": "nameserver", "address": ["108.61.10.10"]},
+ {
+ "name": "eth0",
+ "type": "physical",
+ "mac_address": "56:00:03:1b:4e:ca",
+ "accept-ra": 1,
+ "subnets": [
+ {"type": "dhcp", "control": "auto"},
+ {"type": "ipv6_slaac", "control": "auto"},
+ {
+ "type": "static6",
+ "control": "auto",
+ "address": "2002:19f0:5:28a7::/64",
+ },
+ ],
+ },
+ {
+ "name": "eth1",
+ "type": "physical",
+ "mac_address": "5a:00:03:1b:4e:ca",
+ "subnets": [
+ {
+ "type": "static",
+ "control": "auto",
+ "address": "10.1.112.3",
+ "netmask": "255.255.240.0",
+ }
+ ],
+ },
+ ],
+}
+
+
+INTERFACE_MAP = {
+ "56:00:03:15:c4:65": "eth0",
+ "56:00:03:1b:4e:ca": "eth0",
+ "5a:00:03:1b:4e:ca": "eth1",
+}
+
+
+EPHERMERAL_USED = ""
+
+
+class TestDataSourceVultr(CiTestCase):
+ def setUp(self):
+ super(TestDataSourceVultr, self).setUp()
+
+ # Stored as a dict to make it easier to maintain
+ raw1 = json.dumps(VULTR_V1_1["vendor-data"][0])
+ raw2 = json.dumps(VULTR_V1_2["vendor-data"][0])
+
+ # Make expected format
+ VULTR_V1_1["vendor-data"] = [raw1]
+ VULTR_V1_2["vendor-data"] = [raw2]
+
+ self.tmp = self.tmp_dir()
+
+ # Test the datasource itself
+ @mock.patch("cloudinit.net.get_interfaces_by_mac")
+ @mock.patch("cloudinit.sources.helpers.vultr.is_vultr")
+ @mock.patch("cloudinit.sources.helpers.vultr.get_metadata")
+ def test_datasource(self, mock_getmeta, mock_isvultr, mock_netmap):
+ mock_getmeta.return_value = VULTR_V1_2
+ mock_isvultr.return_value = True
+ mock_netmap.return_value = INTERFACE_MAP
+
+ source = DataSourceVultr.DataSourceVultr(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+
+ # Test for failure
+ self.assertEqual(True, source._get_data())
+
+ # Test instance id
+ self.assertEqual("42872224", source.metadata["instanceid"])
+
+ # Test hostname
+ self.assertEqual("CLOUDINIT_2", source.metadata["local-hostname"])
+
+ # Test ssh keys
+ self.assertEqual(SSH_KEYS_1, source.metadata["public-keys"])
+
+ # Test vendor data generation
+ orig_val = self.maxDiff
+ self.maxDiff = None
+
+ vendordata = source.vendordata_raw
+
+ # Test vendor config
+ self.assertEqual(
+ EXPECTED_VULTR_CONFIG,
+ json.loads(vendordata[0].replace("#cloud-config", "")),
+ )
+
+ self.maxDiff = orig_val
+
+ # Test network config generation
+ self.assertEqual(EXPECTED_VULTR_NETWORK_2, source.network_config)
+
+ # Test network config generation
+ @mock.patch("cloudinit.net.get_interfaces_by_mac")
+ def test_network_config(self, mock_netmap):
+ mock_netmap.return_value = INTERFACE_MAP
+ interf = VULTR_V1_1["interfaces"]
+
+ self.assertEqual(
+ EXPECTED_VULTR_NETWORK_1, vultr.generate_network_config(interf)
+ )
+
+ # Test Private Networking config generation
+ @mock.patch("cloudinit.net.get_interfaces_by_mac")
+ def test_private_network_config(self, mock_netmap):
+ mock_netmap.return_value = INTERFACE_MAP
+ interf = VULTR_V1_2["interfaces"]
+
+ self.assertEqual(
+ EXPECTED_VULTR_NETWORK_2, vultr.generate_network_config(interf)
+ )
+
+ def ephemeral_init(self, iface="", connectivity_url_data=None):
+ global EPHERMERAL_USED
+ EPHERMERAL_USED = iface
+ if iface == "eth0":
+ return
+ raise NoDHCPLeaseError("Generic for testing")
+
+ # Test interface seeking to ensure we are able to find the correct one
+ @mock.patch("cloudinit.net.dhcp.EphemeralDHCPv4.__init__", ephemeral_init)
+ @mock.patch("cloudinit.sources.helpers.vultr.is_vultr")
+ @mock.patch("cloudinit.sources.helpers.vultr.read_metadata")
+ @mock.patch("cloudinit.net.get_interfaces")
+ def test_interface_seek(
+ self, mock_get_interfaces, mock_read_metadata, mock_isvultr
+ ):
+ mock_read_metadata.side_effect = NoDHCPLeaseError(
+ "Generic for testing"
+ )
+ mock_isvultr.return_value = True
+ mock_get_interfaces.return_value = INTERFACES
+
+ source = DataSourceVultr.DataSourceVultr(
+ settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp})
+ )
+
+ try:
+ source._get_data()
+ except Exception:
+ pass
+
+ self.assertEqual(EPHERMERAL_USED, INTERFACES[3][0])
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_vmware/__init__.py b/tests/unittests/sources/vmware/__init__.py
index e69de29b..e69de29b 100644
--- a/tests/unittests/test_vmware/__init__.py
+++ b/tests/unittests/sources/vmware/__init__.py
diff --git a/tests/unittests/test_vmware/test_custom_script.py b/tests/unittests/sources/vmware/test_custom_script.py
index f89f8157..9b3e079f 100644
--- a/tests/unittests/test_vmware/test_custom_script.py
+++ b/tests/unittests/sources/vmware/test_custom_script.py
@@ -7,14 +7,15 @@
import os
import stat
+
from cloudinit import util
from cloudinit.sources.helpers.vmware.imc.config_custom_script import (
CustomScriptConstant,
CustomScriptNotFound,
- PreCustomScript,
PostCustomScript,
+ PreCustomScript,
)
-from cloudinit.tests.helpers import CiTestCase, mock
+from tests.unittests.helpers import CiTestCase, mock
class TestVmwareCustomScript(CiTestCase):
@@ -22,8 +23,7 @@ class TestVmwareCustomScript(CiTestCase):
self.tmpDir = self.tmp_dir()
# Mock the tmpDir as the root dir in VM.
self.execDir = os.path.join(self.tmpDir, ".customization")
- self.execScript = os.path.join(self.execDir,
- ".customize.sh")
+ self.execScript = os.path.join(self.execDir, ".customize.sh")
def test_prepare_custom_script(self):
"""
@@ -36,23 +36,24 @@ class TestVmwareCustomScript(CiTestCase):
preCust = PreCustomScript("random-vmw-test", self.tmpDir)
self.assertEqual("random-vmw-test", preCust.scriptname)
self.assertEqual(self.tmpDir, preCust.directory)
- self.assertEqual(self.tmp_path("random-vmw-test", self.tmpDir),
- preCust.scriptpath)
+ self.assertEqual(
+ self.tmp_path("random-vmw-test", self.tmpDir), preCust.scriptpath
+ )
with self.assertRaises(CustomScriptNotFound):
preCust.prepare_script()
# Custom script exists.
custScript = self.tmp_path("test-cust", self.tmpDir)
util.write_file(custScript, "test-CR-strip\r\r")
- with mock.patch.object(CustomScriptConstant,
- "CUSTOM_TMP_DIR",
- self.execDir):
- with mock.patch.object(CustomScriptConstant,
- "CUSTOM_SCRIPT",
- self.execScript):
- postCust = PostCustomScript("test-cust",
- self.tmpDir,
- self.tmpDir)
+ with mock.patch.object(
+ CustomScriptConstant, "CUSTOM_TMP_DIR", self.execDir
+ ):
+ with mock.patch.object(
+ CustomScriptConstant, "CUSTOM_SCRIPT", self.execScript
+ ):
+ postCust = PostCustomScript(
+ "test-cust", self.tmpDir, self.tmpDir
+ )
self.assertEqual("test-cust", postCust.scriptname)
self.assertEqual(self.tmpDir, postCust.directory)
self.assertEqual(custScript, postCust.scriptpath)
@@ -84,26 +85,30 @@ class TestVmwareCustomScript(CiTestCase):
ccScriptDir = self.tmp_dir()
ccScript = os.path.join(ccScriptDir, "post-customize-guest.sh")
markerFile = os.path.join(self.tmpDir, ".markerFile")
- with mock.patch.object(CustomScriptConstant,
- "CUSTOM_TMP_DIR",
- self.execDir):
- with mock.patch.object(CustomScriptConstant,
- "CUSTOM_SCRIPT",
- self.execScript):
- with mock.patch.object(CustomScriptConstant,
- "POST_CUSTOM_PENDING_MARKER",
- markerFile):
- postCust = PostCustomScript("test-cust",
- self.tmpDir,
- ccScriptDir)
+ with mock.patch.object(
+ CustomScriptConstant, "CUSTOM_TMP_DIR", self.execDir
+ ):
+ with mock.patch.object(
+ CustomScriptConstant, "CUSTOM_SCRIPT", self.execScript
+ ):
+ with mock.patch.object(
+ CustomScriptConstant,
+ "POST_CUSTOM_PENDING_MARKER",
+ markerFile,
+ ):
+ postCust = PostCustomScript(
+ "test-cust", self.tmpDir, ccScriptDir
+ )
postCust.execute()
# Check cc_scripts_per_instance and marker file
# are created.
self.assertTrue(os.path.exists(ccScript))
with open(ccScript, "r") as f:
content = f.read()
- self.assertEqual(content,
- "This is the script to run post cust")
+ self.assertEqual(
+ content, "This is the script to run post cust"
+ )
self.assertTrue(os.path.exists(markerFile))
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/vmware/test_guestcust_util.py b/tests/unittests/sources/vmware/test_guestcust_util.py
new file mode 100644
index 00000000..fc63bcae
--- /dev/null
+++ b/tests/unittests/sources/vmware/test_guestcust_util.py
@@ -0,0 +1,109 @@
+# Copyright (C) 2019 Canonical Ltd.
+# Copyright (C) 2019 VMware INC.
+#
+# Author: Xiaofeng Wang <xiaofengw@vmware.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit import subp
+from cloudinit.sources.helpers.vmware.imc.config import Config
+from cloudinit.sources.helpers.vmware.imc.config_file import ConfigFile
+from cloudinit.sources.helpers.vmware.imc.guestcust_util import (
+ get_tools_config,
+ set_gc_status,
+)
+from tests.unittests.helpers import CiTestCase, mock
+
+
+class TestGuestCustUtil(CiTestCase):
+ def test_get_tools_config_not_installed(self):
+ """
+ This test is designed to verify the behavior if vmware-toolbox-cmd
+ is not installed.
+ """
+ with mock.patch.object(subp, "which", return_value=None):
+ self.assertEqual(
+ get_tools_config("section", "key", "defaultVal"), "defaultVal"
+ )
+
+ def test_get_tools_config_internal_exception(self):
+ """
+ This test is designed to verify the behavior if internal exception
+ is raised.
+ """
+ with mock.patch.object(subp, "which", return_value="/dummy/path"):
+ with mock.patch.object(
+ subp,
+ "subp",
+ return_value=("key=value", b""),
+ side_effect=subp.ProcessExecutionError(
+ "subp failed", exit_code=99
+ ),
+ ):
+ # verify return value is 'defaultVal', not 'value'.
+ self.assertEqual(
+ get_tools_config("section", "key", "defaultVal"),
+ "defaultVal",
+ )
+
+ def test_get_tools_config_normal(self):
+ """
+ This test is designed to verify the value could be parsed from
+ key = value of the given [section]
+ """
+ with mock.patch.object(subp, "which", return_value="/dummy/path"):
+ # value is not blank
+ with mock.patch.object(
+ subp, "subp", return_value=("key = value ", b"")
+ ):
+ self.assertEqual(
+ get_tools_config("section", "key", "defaultVal"), "value"
+ )
+ # value is blank
+ with mock.patch.object(subp, "subp", return_value=("key = ", b"")):
+ self.assertEqual(
+ get_tools_config("section", "key", "defaultVal"), ""
+ )
+ # value contains =
+ with mock.patch.object(
+ subp, "subp", return_value=("key=Bar=Wark", b"")
+ ):
+ self.assertEqual(
+ get_tools_config("section", "key", "defaultVal"),
+ "Bar=Wark",
+ )
+
+ # value contains specific characters
+ with mock.patch.object(
+ subp, "subp", return_value=("[a] b.c_d=e-f", b"")
+ ):
+ self.assertEqual(
+ get_tools_config("section", "key", "defaultVal"), "e-f"
+ )
+
+ def test_set_gc_status(self):
+ """
+ This test is designed to verify the behavior of set_gc_status
+ """
+ # config is None, return None
+ self.assertEqual(set_gc_status(None, "Successful"), None)
+
+ # post gc status is NO, return None
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+ conf = Config(cf)
+ self.assertEqual(set_gc_status(conf, "Successful"), None)
+
+ # post gc status is YES, subp is called to execute command
+ cf._insertKey("MISC|POST-GC-STATUS", "YES")
+ conf = Config(cf)
+ with mock.patch.object(
+ subp, "subp", return_value=("ok", b"")
+ ) as mockobj:
+ self.assertEqual(set_gc_status(conf, "Successful"), ("ok", b""))
+ mockobj.assert_called_once_with(
+ ["vmware-rpctool", "info-set guestinfo.gc.status Successful"],
+ rcs=[0],
+ )
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/vmware/test_vmware_config_file.py b/tests/unittests/sources/vmware/test_vmware_config_file.py
new file mode 100644
index 00000000..38d45d0e
--- /dev/null
+++ b/tests/unittests/sources/vmware/test_vmware_config_file.py
@@ -0,0 +1,635 @@
+# Copyright (C) 2015 Canonical Ltd.
+# Copyright (C) 2016 VMware INC.
+#
+# Author: Sankar Tanguturi <stanguturi@vmware.com>
+# Pengpeng Sun <pengpengs@vmware.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import logging
+import os
+import sys
+import tempfile
+import textwrap
+
+from cloudinit.sources.DataSourceOVF import (
+ get_network_config_from_conf,
+ read_vmware_imc,
+)
+from cloudinit.sources.helpers.vmware.imc.boot_proto import BootProtoEnum
+from cloudinit.sources.helpers.vmware.imc.config import Config
+from cloudinit.sources.helpers.vmware.imc.config_file import (
+ ConfigFile as WrappedConfigFile,
+)
+from cloudinit.sources.helpers.vmware.imc.config_nic import (
+ NicConfigurator,
+ gen_subnet,
+)
+from tests.unittests.helpers import CiTestCase, cloud_init_project_dir
+
+logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
+logger = logging.getLogger(__name__)
+
+
+def ConfigFile(path: str):
+ return WrappedConfigFile(cloud_init_project_dir(path))
+
+
+class TestVmwareConfigFile(CiTestCase):
+ def test_utility_methods(self):
+ """Tests basic utility methods of ConfigFile class"""
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+
+ cf.clear()
+
+ self.assertEqual(0, len(cf), "clear size")
+
+ cf._insertKey(" PASSWORD|-PASS ", " foo ")
+ cf._insertKey("BAR", " ")
+
+ self.assertEqual(2, len(cf), "insert size")
+ self.assertEqual("foo", cf["PASSWORD|-PASS"], "password")
+ self.assertTrue("PASSWORD|-PASS" in cf, "hasPassword")
+ self.assertFalse(
+ cf.should_keep_current_value("PASSWORD|-PASS"), "keepPassword"
+ )
+ self.assertFalse(
+ cf.should_remove_current_value("PASSWORD|-PASS"), "removePassword"
+ )
+ self.assertFalse("FOO" in cf, "hasFoo")
+ self.assertTrue(cf.should_keep_current_value("FOO"), "keepFoo")
+ self.assertFalse(cf.should_remove_current_value("FOO"), "removeFoo")
+ self.assertTrue("BAR" in cf, "hasBar")
+ self.assertFalse(cf.should_keep_current_value("BAR"), "keepBar")
+ self.assertTrue(cf.should_remove_current_value("BAR"), "removeBar")
+
+ def test_datasource_instance_id(self):
+ """Tests instance id for the DatasourceOVF"""
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+
+ instance_id_prefix = "iid-vmware-"
+
+ conf = Config(cf)
+
+ (md1, _, _) = read_vmware_imc(conf)
+ self.assertIn(instance_id_prefix, md1["instance-id"])
+ self.assertEqual(md1["instance-id"], "iid-vmware-imc")
+
+ (md2, _, _) = read_vmware_imc(conf)
+ self.assertIn(instance_id_prefix, md2["instance-id"])
+ self.assertEqual(md2["instance-id"], "iid-vmware-imc")
+
+ self.assertEqual(md2["instance-id"], md1["instance-id"])
+
+ def test_configfile_static_2nics(self):
+ """Tests Config class for a configuration with two static NICs."""
+ cf = ConfigFile("tests/data/vmware/cust-static-2nic.cfg")
+
+ conf = Config(cf)
+
+ self.assertEqual("myhost1", conf.host_name, "hostName")
+ self.assertEqual("Africa/Abidjan", conf.timezone, "tz")
+ self.assertTrue(conf.utc, "utc")
+
+ self.assertEqual(
+ ["10.20.145.1", "10.20.145.2"], conf.name_servers, "dns"
+ )
+ self.assertEqual(
+ ["eng.vmware.com", "proxy.vmware.com"],
+ conf.dns_suffixes,
+ "suffixes",
+ )
+
+ nics = conf.nics
+ ipv40 = nics[0].staticIpv4
+
+ self.assertEqual(2, len(nics), "nics")
+ self.assertEqual("NIC1", nics[0].name, "nic0")
+ self.assertEqual("00:50:56:a6:8c:08", nics[0].mac, "mac0")
+ self.assertEqual(BootProtoEnum.STATIC, nics[0].bootProto, "bootproto0")
+ self.assertEqual("10.20.87.154", ipv40[0].ip, "ipv4Addr0")
+ self.assertEqual("255.255.252.0", ipv40[0].netmask, "ipv4Mask0")
+ self.assertEqual(2, len(ipv40[0].gateways), "ipv4Gw0")
+ self.assertEqual("10.20.87.253", ipv40[0].gateways[0], "ipv4Gw0_0")
+ self.assertEqual("10.20.87.105", ipv40[0].gateways[1], "ipv4Gw0_1")
+
+ self.assertEqual(1, len(nics[0].staticIpv6), "ipv6Cnt0")
+ self.assertEqual(
+ "fc00:10:20:87::154", nics[0].staticIpv6[0].ip, "ipv6Addr0"
+ )
+
+ self.assertEqual("NIC2", nics[1].name, "nic1")
+ self.assertTrue(not nics[1].staticIpv6, "ipv61 dhcp")
+
+ def test_config_file_dhcp_2nics(self):
+ """Tests Config class for a configuration with two DHCP NICs."""
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+
+ conf = Config(cf)
+ nics = conf.nics
+ self.assertEqual(2, len(nics), "nics")
+ self.assertEqual("NIC1", nics[0].name, "nic0")
+ self.assertEqual("00:50:56:a6:8c:08", nics[0].mac, "mac0")
+ self.assertEqual(BootProtoEnum.DHCP, nics[0].bootProto, "bootproto0")
+
+ def test_config_password(self):
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+
+ cf._insertKey("PASSWORD|-PASS", "test-password")
+ cf._insertKey("PASSWORD|RESET", "no")
+
+ conf = Config(cf)
+ self.assertEqual("test-password", conf.admin_password, "password")
+ self.assertFalse(conf.reset_password, "do not reset password")
+
+ def test_config_reset_passwd(self):
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+
+ cf._insertKey("PASSWORD|-PASS", "test-password")
+ cf._insertKey("PASSWORD|RESET", "random")
+
+ conf = Config(cf)
+ with self.assertRaises(ValueError):
+ pw = conf.reset_password
+ self.assertIsNone(pw)
+
+ cf.clear()
+ cf._insertKey("PASSWORD|RESET", "yes")
+ self.assertEqual(1, len(cf), "insert size")
+
+ conf = Config(cf)
+ self.assertTrue(conf.reset_password, "reset password")
+
+ def test_get_config_nameservers(self):
+ """Tests DNS and nameserver settings in a configuration."""
+ cf = ConfigFile("tests/data/vmware/cust-static-2nic.cfg")
+
+ config = Config(cf)
+
+ network_config = get_network_config_from_conf(config, False)
+
+ self.assertEqual(1, network_config.get("version"))
+
+ config_types = network_config.get("config")
+ name_servers = None
+ dns_suffixes = None
+
+ for type in config_types:
+ if type.get("type") == "nameserver":
+ name_servers = type.get("address")
+ dns_suffixes = type.get("search")
+ break
+
+ self.assertEqual(["10.20.145.1", "10.20.145.2"], name_servers, "dns")
+ self.assertEqual(
+ ["eng.vmware.com", "proxy.vmware.com"], dns_suffixes, "suffixes"
+ )
+
+ def test_gen_subnet(self):
+ """Tests if gen_subnet properly calculates network subnet from
+ IPv4 address and netmask"""
+ ip_subnet_list = [
+ ["10.20.87.253", "255.255.252.0", "10.20.84.0"],
+ ["10.20.92.105", "255.255.252.0", "10.20.92.0"],
+ ["192.168.0.10", "255.255.0.0", "192.168.0.0"],
+ ]
+ for entry in ip_subnet_list:
+ self.assertEqual(
+ entry[2],
+ gen_subnet(entry[0], entry[1]),
+ "Subnet for a specified ip and netmask",
+ )
+
+ def test_get_config_dns_suffixes(self):
+ """Tests if get_network_config_from_conf properly
+ generates nameservers and dns settings from a
+ specified configuration"""
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+
+ config = Config(cf)
+
+ network_config = get_network_config_from_conf(config, False)
+
+ self.assertEqual(1, network_config.get("version"))
+
+ config_types = network_config.get("config")
+ name_servers = None
+ dns_suffixes = None
+
+ for type in config_types:
+ if type.get("type") == "nameserver":
+ name_servers = type.get("address")
+ dns_suffixes = type.get("search")
+ break
+
+ self.assertEqual([], name_servers, "dns")
+ self.assertEqual(["eng.vmware.com"], dns_suffixes, "suffixes")
+
+ def test_get_nics_list_dhcp(self):
+ """Tests if NicConfigurator properly calculates network subnets
+ for a configuration with a list of DHCP NICs"""
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+
+ config = Config(cf)
+
+ nicConfigurator = NicConfigurator(config.nics, False)
+ nics_cfg_list = nicConfigurator.generate()
+
+ self.assertEqual(2, len(nics_cfg_list), "number of config elements")
+
+ nic1 = {"name": "NIC1"}
+ nic2 = {"name": "NIC2"}
+ for cfg in nics_cfg_list:
+ if cfg.get("name") == nic1.get("name"):
+ nic1.update(cfg)
+ elif cfg.get("name") == nic2.get("name"):
+ nic2.update(cfg)
+
+ self.assertEqual("physical", nic1.get("type"), "type of NIC1")
+ self.assertEqual("NIC1", nic1.get("name"), "name of NIC1")
+ self.assertEqual(
+ "00:50:56:a6:8c:08", nic1.get("mac_address"), "mac address of NIC1"
+ )
+ subnets = nic1.get("subnets")
+ self.assertEqual(1, len(subnets), "number of subnets for NIC1")
+ subnet = subnets[0]
+ self.assertEqual("dhcp", subnet.get("type"), "DHCP type for NIC1")
+ self.assertEqual("auto", subnet.get("control"), "NIC1 Control type")
+
+ self.assertEqual("physical", nic2.get("type"), "type of NIC2")
+ self.assertEqual("NIC2", nic2.get("name"), "name of NIC2")
+ self.assertEqual(
+ "00:50:56:a6:5a:de", nic2.get("mac_address"), "mac address of NIC2"
+ )
+ subnets = nic2.get("subnets")
+ self.assertEqual(1, len(subnets), "number of subnets for NIC2")
+ subnet = subnets[0]
+ self.assertEqual("dhcp", subnet.get("type"), "DHCP type for NIC2")
+ self.assertEqual("auto", subnet.get("control"), "NIC2 Control type")
+
+ def test_get_nics_list_static(self):
+ """Tests if NicConfigurator properly calculates network subnets
+ for a configuration with 2 static NICs"""
+ cf = ConfigFile("tests/data/vmware/cust-static-2nic.cfg")
+
+ config = Config(cf)
+
+ nicConfigurator = NicConfigurator(config.nics, False)
+ nics_cfg_list = nicConfigurator.generate()
+
+ self.assertEqual(2, len(nics_cfg_list), "number of elements")
+
+ nic1 = {"name": "NIC1"}
+ nic2 = {"name": "NIC2"}
+ route_list = []
+ for cfg in nics_cfg_list:
+ cfg_type = cfg.get("type")
+ if cfg_type == "physical":
+ if cfg.get("name") == nic1.get("name"):
+ nic1.update(cfg)
+ elif cfg.get("name") == nic2.get("name"):
+ nic2.update(cfg)
+
+ self.assertEqual("physical", nic1.get("type"), "type of NIC1")
+ self.assertEqual("NIC1", nic1.get("name"), "name of NIC1")
+ self.assertEqual(
+ "00:50:56:a6:8c:08", nic1.get("mac_address"), "mac address of NIC1"
+ )
+
+ subnets = nic1.get("subnets")
+ self.assertEqual(2, len(subnets), "Number of subnets")
+
+ static_subnet = []
+ static6_subnet = []
+
+ for subnet in subnets:
+ subnet_type = subnet.get("type")
+ if subnet_type == "static":
+ static_subnet.append(subnet)
+ elif subnet_type == "static6":
+ static6_subnet.append(subnet)
+ else:
+ self.assertEqual(True, False, "Unknown type")
+ if "route" in subnet:
+ for route in subnet.get("routes"):
+ route_list.append(route)
+
+ self.assertEqual(1, len(static_subnet), "Number of static subnet")
+ self.assertEqual(1, len(static6_subnet), "Number of static6 subnet")
+
+ subnet = static_subnet[0]
+ self.assertEqual(
+ "10.20.87.154",
+ subnet.get("address"),
+ "IPv4 address of static subnet",
+ )
+ self.assertEqual(
+ "255.255.252.0", subnet.get("netmask"), "NetMask of static subnet"
+ )
+ self.assertEqual(
+ "auto", subnet.get("control"), "control for static subnet"
+ )
+
+ subnet = static6_subnet[0]
+ self.assertEqual(
+ "fc00:10:20:87::154",
+ subnet.get("address"),
+ "IPv6 address of static subnet",
+ )
+ self.assertEqual(
+ "64", subnet.get("netmask"), "NetMask of static6 subnet"
+ )
+
+ route_set = set(["10.20.87.253", "10.20.87.105", "192.168.0.10"])
+ for route in route_list:
+ self.assertEqual(10000, route.get("metric"), "metric of route")
+ gateway = route.get("gateway")
+ if gateway in route_set:
+ route_set.discard(gateway)
+ else:
+ self.assertEqual(True, False, "invalid gateway %s" % (gateway))
+
+ self.assertEqual("physical", nic2.get("type"), "type of NIC2")
+ self.assertEqual("NIC2", nic2.get("name"), "name of NIC2")
+ self.assertEqual(
+ "00:50:56:a6:ef:7d", nic2.get("mac_address"), "mac address of NIC2"
+ )
+
+ subnets = nic2.get("subnets")
+ self.assertEqual(1, len(subnets), "Number of subnets for NIC2")
+
+ subnet = subnets[0]
+ self.assertEqual("static", subnet.get("type"), "Subnet type")
+ self.assertEqual(
+ "192.168.6.102", subnet.get("address"), "Subnet address"
+ )
+ self.assertEqual(
+ "255.255.0.0", subnet.get("netmask"), "Subnet netmask"
+ )
+
+ def test_custom_script(self):
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+ conf = Config(cf)
+ self.assertIsNone(conf.custom_script_name)
+ cf._insertKey("CUSTOM-SCRIPT|SCRIPT-NAME", "test-script")
+ conf = Config(cf)
+ self.assertEqual("test-script", conf.custom_script_name)
+
+ def test_post_gc_status(self):
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+ conf = Config(cf)
+ self.assertFalse(conf.post_gc_status)
+ cf._insertKey("MISC|POST-GC-STATUS", "YES")
+ conf = Config(cf)
+ self.assertTrue(conf.post_gc_status)
+
+ def test_no_default_run_post_script(self):
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+ conf = Config(cf)
+ self.assertFalse(conf.default_run_post_script)
+ cf._insertKey("MISC|DEFAULT-RUN-POST-CUST-SCRIPT", "NO")
+ conf = Config(cf)
+ self.assertFalse(conf.default_run_post_script)
+
+ def test_yes_default_run_post_script(self):
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+ cf._insertKey("MISC|DEFAULT-RUN-POST-CUST-SCRIPT", "yes")
+ conf = Config(cf)
+ self.assertTrue(conf.default_run_post_script)
+
+
+class TestVmwareNetConfig(CiTestCase):
+ """Test conversion of vmware config to cloud-init config."""
+
+ maxDiff = None
+
+ def _get_NicConfigurator(self, text):
+ fp = None
+ try:
+ with tempfile.NamedTemporaryFile(
+ mode="w", dir=self.tmp_dir(), delete=False
+ ) as fp:
+ fp.write(text)
+ fp.close()
+ cfg = Config(ConfigFile(fp.name))
+ return NicConfigurator(cfg.nics, use_system_devices=False)
+ finally:
+ if fp:
+ os.unlink(fp.name)
+
+ def test_non_primary_nic_without_gateway(self):
+ """A non primary nic set is not required to have a gateway."""
+ config = textwrap.dedent(
+ """\
+ [NETWORK]
+ NETWORKING = yes
+ BOOTPROTO = dhcp
+ HOSTNAME = myhost1
+ DOMAINNAME = eng.vmware.com
+
+ [NIC-CONFIG]
+ NICS = NIC1
+
+ [NIC1]
+ MACADDR = 00:50:56:a6:8c:08
+ ONBOOT = yes
+ IPv4_MODE = BACKWARDS_COMPATIBLE
+ BOOTPROTO = static
+ IPADDR = 10.20.87.154
+ NETMASK = 255.255.252.0
+ """
+ )
+ nc = self._get_NicConfigurator(config)
+ self.assertEqual(
+ [
+ {
+ "type": "physical",
+ "name": "NIC1",
+ "mac_address": "00:50:56:a6:8c:08",
+ "subnets": [
+ {
+ "control": "auto",
+ "type": "static",
+ "address": "10.20.87.154",
+ "netmask": "255.255.252.0",
+ }
+ ],
+ }
+ ],
+ nc.generate(),
+ )
+
+ def test_non_primary_nic_with_gateway(self):
+ """A non primary nic set can have a gateway."""
+ config = textwrap.dedent(
+ """\
+ [NETWORK]
+ NETWORKING = yes
+ BOOTPROTO = dhcp
+ HOSTNAME = myhost1
+ DOMAINNAME = eng.vmware.com
+
+ [NIC-CONFIG]
+ NICS = NIC1
+
+ [NIC1]
+ MACADDR = 00:50:56:a6:8c:08
+ ONBOOT = yes
+ IPv4_MODE = BACKWARDS_COMPATIBLE
+ BOOTPROTO = static
+ IPADDR = 10.20.87.154
+ NETMASK = 255.255.252.0
+ GATEWAY = 10.20.87.253
+ """
+ )
+ nc = self._get_NicConfigurator(config)
+ self.assertEqual(
+ [
+ {
+ "type": "physical",
+ "name": "NIC1",
+ "mac_address": "00:50:56:a6:8c:08",
+ "subnets": [
+ {
+ "control": "auto",
+ "type": "static",
+ "address": "10.20.87.154",
+ "netmask": "255.255.252.0",
+ "routes": [
+ {
+ "type": "route",
+ "destination": "10.20.84.0/22",
+ "gateway": "10.20.87.253",
+ "metric": 10000,
+ }
+ ],
+ }
+ ],
+ }
+ ],
+ nc.generate(),
+ )
+
+ def test_cust_non_primary_nic_with_gateway_(self):
+ """A customer non primary nic set can have a gateway."""
+ config = textwrap.dedent(
+ """\
+ [NETWORK]
+ NETWORKING = yes
+ BOOTPROTO = dhcp
+ HOSTNAME = static-debug-vm
+ DOMAINNAME = cluster.local
+
+ [NIC-CONFIG]
+ NICS = NIC1
+
+ [NIC1]
+ MACADDR = 00:50:56:ac:d1:8a
+ ONBOOT = yes
+ IPv4_MODE = BACKWARDS_COMPATIBLE
+ BOOTPROTO = static
+ IPADDR = 100.115.223.75
+ NETMASK = 255.255.255.0
+ GATEWAY = 100.115.223.254
+
+
+ [DNS]
+ DNSFROMDHCP=no
+
+ NAMESERVER|1 = 8.8.8.8
+
+ [DATETIME]
+ UTC = yes
+ """
+ )
+ nc = self._get_NicConfigurator(config)
+ self.assertEqual(
+ [
+ {
+ "type": "physical",
+ "name": "NIC1",
+ "mac_address": "00:50:56:ac:d1:8a",
+ "subnets": [
+ {
+ "control": "auto",
+ "type": "static",
+ "address": "100.115.223.75",
+ "netmask": "255.255.255.0",
+ "routes": [
+ {
+ "type": "route",
+ "destination": "100.115.223.0/24",
+ "gateway": "100.115.223.254",
+ "metric": 10000,
+ }
+ ],
+ }
+ ],
+ }
+ ],
+ nc.generate(),
+ )
+
+ def test_a_primary_nic_with_gateway(self):
+ """A primary nic set can have a gateway."""
+ config = textwrap.dedent(
+ """\
+ [NETWORK]
+ NETWORKING = yes
+ BOOTPROTO = dhcp
+ HOSTNAME = myhost1
+ DOMAINNAME = eng.vmware.com
+
+ [NIC-CONFIG]
+ NICS = NIC1
+
+ [NIC1]
+ MACADDR = 00:50:56:a6:8c:08
+ ONBOOT = yes
+ IPv4_MODE = BACKWARDS_COMPATIBLE
+ BOOTPROTO = static
+ IPADDR = 10.20.87.154
+ NETMASK = 255.255.252.0
+ PRIMARY = true
+ GATEWAY = 10.20.87.253
+ """
+ )
+ nc = self._get_NicConfigurator(config)
+ self.assertEqual(
+ [
+ {
+ "type": "physical",
+ "name": "NIC1",
+ "mac_address": "00:50:56:a6:8c:08",
+ "subnets": [
+ {
+ "control": "auto",
+ "type": "static",
+ "address": "10.20.87.154",
+ "netmask": "255.255.252.0",
+ "gateway": "10.20.87.253",
+ }
+ ],
+ }
+ ],
+ nc.generate(),
+ )
+
+ def test_meta_data(self):
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+ conf = Config(cf)
+ self.assertIsNone(conf.meta_data_name)
+ cf._insertKey("CLOUDINIT|METADATA", "test-metadata")
+ conf = Config(cf)
+ self.assertEqual("test-metadata", conf.meta_data_name)
+
+ def test_user_data(self):
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+ conf = Config(cf)
+ self.assertIsNone(conf.user_data_name)
+ cf._insertKey("CLOUDINIT|USERDATA", "test-userdata")
+ conf = Config(cf)
+ self.assertEqual("test-userdata", conf.user_data_name)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py
index 739bbebf..0ed8a120 100644
--- a/tests/unittests/test__init__.py
+++ b/tests/unittests/test__init__.py
@@ -5,14 +5,9 @@ import os
import shutil
import tempfile
+from cloudinit import handlers, helpers, settings, url_helper, util
from cloudinit.cmd import main
-from cloudinit import handlers
-from cloudinit import helpers
-from cloudinit import settings
-from cloudinit import url_helper
-from cloudinit import util
-
-from cloudinit.tests.helpers import TestCase, CiTestCase, ExitStack, mock
+from tests.unittests.helpers import CiTestCase, ExitStack, TestCase, mock
class FakeModule(handlers.Handler):
@@ -28,7 +23,6 @@ class FakeModule(handlers.Handler):
class TestWalkerHandleHandler(TestCase):
-
def setUp(self):
super(TestWalkerHandleHandler, self).setUp()
tmpdir = tempfile.mkdtemp()
@@ -39,13 +33,16 @@ class TestWalkerHandleHandler(TestCase):
"frequency": "",
"handlerdir": tmpdir,
"handlers": helpers.ContentHandlers(),
- "data": None}
+ "data": None,
+ }
self.expected_module_name = "part-handler-%03d" % (
- self.data["handlercount"],)
+ self.data["handlercount"],
+ )
expected_file_name = "%s.py" % self.expected_module_name
self.expected_file_fullname = os.path.join(
- self.data["handlerdir"], expected_file_name)
+ self.data["handlerdir"], expected_file_name
+ )
self.module_fake = FakeModule()
self.ctype = None
self.filename = None
@@ -56,45 +53,55 @@ class TestWalkerHandleHandler(TestCase):
resources = ExitStack()
self.addCleanup(resources.close)
self.write_file_mock = resources.enter_context(
- mock.patch('cloudinit.util.write_file'))
+ mock.patch("cloudinit.util.write_file")
+ )
def test_no_errors(self):
"""Payload gets written to file and added to C{pdata}."""
- with mock.patch('cloudinit.importer.import_module',
- return_value=self.module_fake) as mockobj:
- handlers.walker_handle_handler(self.data, self.ctype,
- self.filename, self.payload)
+ with mock.patch(
+ "cloudinit.importer.import_module", return_value=self.module_fake
+ ) as mockobj:
+ handlers.walker_handle_handler(
+ self.data, self.ctype, self.filename, self.payload
+ )
mockobj.assert_called_once_with(self.expected_module_name)
self.write_file_mock.assert_called_once_with(
- self.expected_file_fullname, self.payload, 0o600)
- self.assertEqual(self.data['handlercount'], 1)
+ self.expected_file_fullname, self.payload, 0o600
+ )
+ self.assertEqual(self.data["handlercount"], 1)
def test_import_error(self):
"""Module import errors are logged. No handler added to C{pdata}."""
- with mock.patch('cloudinit.importer.import_module',
- side_effect=ImportError) as mockobj:
- handlers.walker_handle_handler(self.data, self.ctype,
- self.filename, self.payload)
+ with mock.patch(
+ "cloudinit.importer.import_module", side_effect=ImportError
+ ) as mockobj:
+ handlers.walker_handle_handler(
+ self.data, self.ctype, self.filename, self.payload
+ )
mockobj.assert_called_once_with(self.expected_module_name)
self.write_file_mock.assert_called_once_with(
- self.expected_file_fullname, self.payload, 0o600)
- self.assertEqual(self.data['handlercount'], 0)
+ self.expected_file_fullname, self.payload, 0o600
+ )
+ self.assertEqual(self.data["handlercount"], 0)
def test_attribute_error(self):
"""Attribute errors are logged. No handler added to C{pdata}."""
- with mock.patch('cloudinit.importer.import_module',
- side_effect=AttributeError,
- return_value=self.module_fake) as mockobj:
- handlers.walker_handle_handler(self.data, self.ctype,
- self.filename, self.payload)
+ with mock.patch(
+ "cloudinit.importer.import_module",
+ side_effect=AttributeError,
+ return_value=self.module_fake,
+ ) as mockobj:
+ handlers.walker_handle_handler(
+ self.data, self.ctype, self.filename, self.payload
+ )
mockobj.assert_called_once_with(self.expected_module_name)
self.write_file_mock.assert_called_once_with(
- self.expected_file_fullname, self.payload, 0o600)
- self.assertEqual(self.data['handlercount'], 0)
+ self.expected_file_fullname, self.payload, 0o600
+ )
+ self.assertEqual(self.data["handlercount"], 0)
class TestHandlerHandlePart(TestCase):
-
def setUp(self):
super(TestHandlerHandlePart, self).setUp()
self.data = "fake data"
@@ -103,7 +110,7 @@ class TestHandlerHandlePart(TestCase):
self.payload = "fake payload"
self.frequency = settings.PER_INSTANCE
self.headers = {
- 'Content-Type': self.ctype,
+ "Content-Type": self.ctype,
}
def test_normal_version_1(self):
@@ -111,126 +118,172 @@ class TestHandlerHandlePart(TestCase):
C{handle_part} is called without C{frequency} for
C{handler_version} == 1.
"""
- mod_mock = mock.Mock(frequency=settings.PER_INSTANCE,
- handler_version=1)
- handlers.run_part(mod_mock, self.data, self.filename, self.payload,
- self.frequency, self.headers)
+ mod_mock = mock.Mock(
+ frequency=settings.PER_INSTANCE, handler_version=1
+ )
+ handlers.run_part(
+ mod_mock,
+ self.data,
+ self.filename,
+ self.payload,
+ self.frequency,
+ self.headers,
+ )
# Assert that the handle_part() method of the mock object got
# called with the expected arguments.
mod_mock.handle_part.assert_called_once_with(
- self.data, self.ctype, self.filename, self.payload)
+ self.data, self.ctype, self.filename, self.payload
+ )
def test_normal_version_2(self):
"""
C{handle_part} is called with C{frequency} for
C{handler_version} == 2.
"""
- mod_mock = mock.Mock(frequency=settings.PER_INSTANCE,
- handler_version=2)
- handlers.run_part(mod_mock, self.data, self.filename, self.payload,
- self.frequency, self.headers)
+ mod_mock = mock.Mock(
+ frequency=settings.PER_INSTANCE, handler_version=2
+ )
+ handlers.run_part(
+ mod_mock,
+ self.data,
+ self.filename,
+ self.payload,
+ self.frequency,
+ self.headers,
+ )
# Assert that the handle_part() method of the mock object got
# called with the expected arguments.
mod_mock.handle_part.assert_called_once_with(
- self.data, self.ctype, self.filename, self.payload,
- settings.PER_INSTANCE)
+ self.data,
+ self.ctype,
+ self.filename,
+ self.payload,
+ settings.PER_INSTANCE,
+ )
def test_modfreq_per_always(self):
"""
C{handle_part} is called regardless of frequency if nofreq is always.
"""
self.frequency = "once"
- mod_mock = mock.Mock(frequency=settings.PER_ALWAYS,
- handler_version=1)
- handlers.run_part(mod_mock, self.data, self.filename, self.payload,
- self.frequency, self.headers)
+ mod_mock = mock.Mock(frequency=settings.PER_ALWAYS, handler_version=1)
+ handlers.run_part(
+ mod_mock,
+ self.data,
+ self.filename,
+ self.payload,
+ self.frequency,
+ self.headers,
+ )
# Assert that the handle_part() method of the mock object got
# called with the expected arguments.
mod_mock.handle_part.assert_called_once_with(
- self.data, self.ctype, self.filename, self.payload)
+ self.data, self.ctype, self.filename, self.payload
+ )
def test_no_handle_when_modfreq_once(self):
"""C{handle_part} is not called if frequency is once."""
self.frequency = "once"
mod_mock = mock.Mock(frequency=settings.PER_ONCE)
- handlers.run_part(mod_mock, self.data, self.filename, self.payload,
- self.frequency, self.headers)
+ handlers.run_part(
+ mod_mock,
+ self.data,
+ self.filename,
+ self.payload,
+ self.frequency,
+ self.headers,
+ )
self.assertEqual(0, mod_mock.handle_part.call_count)
def test_exception_is_caught(self):
"""Exceptions within C{handle_part} are caught and logged."""
- mod_mock = mock.Mock(frequency=settings.PER_INSTANCE,
- handler_version=1)
+ mod_mock = mock.Mock(
+ frequency=settings.PER_INSTANCE, handler_version=1
+ )
mod_mock.handle_part.side_effect = Exception
try:
- handlers.run_part(mod_mock, self.data, self.filename,
- self.payload, self.frequency, self.headers)
+ handlers.run_part(
+ mod_mock,
+ self.data,
+ self.filename,
+ self.payload,
+ self.frequency,
+ self.headers,
+ )
except Exception:
self.fail("Exception was not caught in handle_part")
mod_mock.handle_part.assert_called_once_with(
- self.data, self.ctype, self.filename, self.payload)
+ self.data, self.ctype, self.filename, self.payload
+ )
class TestCmdlineUrl(CiTestCase):
def test_parse_cmdline_url_nokey_raises_keyerror(self):
self.assertRaises(
- KeyError, main.parse_cmdline_url, 'root=foo bar single')
+ KeyError, main.parse_cmdline_url, "root=foo bar single"
+ )
def test_parse_cmdline_url_found(self):
- cmdline = 'root=foo bar single url=http://example.com arg1 -v'
+ cmdline = "root=foo bar single url=http://example.com arg1 -v"
self.assertEqual(
- ('url', 'http://example.com'), main.parse_cmdline_url(cmdline))
+ ("url", "http://example.com"), main.parse_cmdline_url(cmdline)
+ )
- @mock.patch('cloudinit.cmd.main.url_helper.read_file_or_url')
+ @mock.patch("cloudinit.cmd.main.url_helper.read_file_or_url")
def test_invalid_content(self, m_read):
key = "cloud-config-url"
- url = 'http://example.com/foo'
+ url = "http://example.com/foo"
cmdline = "ro %s=%s bar=1" % (key, url)
m_read.return_value = url_helper.StringResponse(b"unexpected blob")
fpath = self.tmp_path("ccfile")
lvl, msg = main.attempt_cmdline_url(
- fpath, network=True, cmdline=cmdline)
+ fpath, network=True, cmdline=cmdline
+ )
self.assertEqual(logging.WARN, lvl)
self.assertIn(url, msg)
self.assertFalse(os.path.exists(fpath))
- @mock.patch('cloudinit.cmd.main.url_helper.read_file_or_url')
+ @mock.patch("cloudinit.cmd.main.url_helper.read_file_or_url")
def test_valid_content(self, m_read):
url = "http://example.com/foo"
payload = b"#cloud-config\nmydata: foo\nbar: wark\n"
- cmdline = "ro %s=%s bar=1" % ('cloud-config-url', url)
+ cmdline = "ro %s=%s bar=1" % ("cloud-config-url", url)
m_read.return_value = url_helper.StringResponse(payload)
fpath = self.tmp_path("ccfile")
lvl, msg = main.attempt_cmdline_url(
- fpath, network=True, cmdline=cmdline)
+ fpath, network=True, cmdline=cmdline
+ )
self.assertEqual(util.load_file(fpath, decode=False), payload)
self.assertEqual(logging.INFO, lvl)
self.assertIn(url, msg)
- @mock.patch('cloudinit.cmd.main.url_helper.read_file_or_url')
+ @mock.patch("cloudinit.cmd.main.url_helper.read_file_or_url")
def test_no_key_found(self, m_read):
cmdline = "ro mykey=http://example.com/foo root=foo"
fpath = self.tmp_path("ccpath")
lvl, _msg = main.attempt_cmdline_url(
- fpath, network=True, cmdline=cmdline)
+ fpath, network=True, cmdline=cmdline
+ )
m_read.assert_not_called()
self.assertFalse(os.path.exists(fpath))
self.assertEqual(logging.DEBUG, lvl)
- @mock.patch('cloudinit.cmd.main.url_helper.read_file_or_url')
+ @mock.patch("cloudinit.cmd.main.url_helper.read_file_or_url")
def test_exception_warns(self, m_read):
url = "http://example.com/foo"
cmdline = "ro cloud-config-url=%s root=LABEL=bar" % url
fpath = self.tmp_path("ccfile")
m_read.side_effect = url_helper.UrlError(
- cause="Unexpected Error", url="http://example.com/foo")
+ cause="Unexpected Error", url="http://example.com/foo"
+ )
lvl, msg = main.attempt_cmdline_url(
- fpath, network=True, cmdline=cmdline)
+ fpath, network=True, cmdline=cmdline
+ )
self.assertEqual(logging.WARN, lvl)
self.assertIn(url, msg)
self.assertFalse(os.path.exists(fpath))
diff --git a/tests/unittests/test_atomic_helper.py b/tests/unittests/test_atomic_helper.py
index 0101b0e3..684a9ae5 100644
--- a/tests/unittests/test_atomic_helper.py
+++ b/tests/unittests/test_atomic_helper.py
@@ -5,8 +5,7 @@ import os
import stat
from cloudinit import atomic_helper
-
-from cloudinit.tests.helpers import CiTestCase
+from tests.unittests.helpers import CiTestCase
class TestAtomicHelper(CiTestCase):
@@ -34,7 +33,7 @@ class TestAtomicHelper(CiTestCase):
def test_write_json(self):
"""write_json output is readable json."""
path = self.tmp_path("test_write_json")
- data = {'key1': 'value1', 'key2': ['i1', 'i2']}
+ data = {"key1": "value1", "key2": ["i1", "i2"]}
atomic_helper.write_json(path, data)
with open(path, "r") as fp:
found = json.load(fp)
@@ -55,4 +54,5 @@ class TestAtomicHelper(CiTestCase):
file_stat = os.stat(path)
self.assertEqual(perms, stat.S_IMODE(file_stat.st_mode))
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_builtin_handlers.py b/tests/unittests/test_builtin_handlers.py
index c5675249..0dae924d 100644
--- a/tests/unittests/test_builtin_handlers.py
+++ b/tests/unittests/test_builtin_handlers.py
@@ -9,47 +9,60 @@ import shutil
import tempfile
from textwrap import dedent
+import pytest
-from cloudinit.tests.helpers import (
- FilesystemMockingTestCase, CiTestCase, mock, skipUnlessJinja)
-
-from cloudinit import handlers
-from cloudinit import helpers
-from cloudinit import subp
-from cloudinit import util
-
+from cloudinit import handlers, helpers, subp, util
+from cloudinit.cmd.devel import read_cfg_paths
from cloudinit.handlers.cloud_config import CloudConfigPartHandler
from cloudinit.handlers.jinja_template import (
- JinjaTemplatePartHandler, convert_jinja_instance_data,
- render_jinja_payload)
+ JinjaTemplatePartHandler,
+ convert_jinja_instance_data,
+ render_jinja_payload,
+)
from cloudinit.handlers.shell_script import ShellScriptPartHandler
+from cloudinit.handlers.shell_script_by_frequency import (
+ get_script_folder_by_frequency,
+ path_map,
+)
from cloudinit.handlers.upstart_job import UpstartJobPartHandler
+from cloudinit.settings import PER_ALWAYS, PER_INSTANCE, PER_ONCE
+from tests.unittests.helpers import (
+ CiTestCase,
+ FilesystemMockingTestCase,
+ mock,
+ skipUnlessJinja,
+)
-from cloudinit.settings import (PER_ALWAYS, PER_INSTANCE)
+INSTANCE_DATA_FILE = "instance-data-sensitive.json"
class TestUpstartJobPartHandler(FilesystemMockingTestCase):
- mpath = 'cloudinit.handlers.upstart_job.'
+ mpath = "cloudinit.handlers.upstart_job."
def test_upstart_frequency_no_out(self):
c_root = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, c_root)
up_root = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, up_root)
- paths = helpers.Paths({
- 'cloud_dir': c_root,
- 'upstart_dir': up_root,
- })
+ paths = helpers.Paths(
+ {
+ "cloud_dir": c_root,
+ "upstart_dir": up_root,
+ }
+ )
h = UpstartJobPartHandler(paths)
# No files should be written out when
# the frequency is ! per-instance
- h.handle_part('', handlers.CONTENT_START,
- None, None, None)
- h.handle_part('blah', 'text/upstart-job',
- 'test.conf', 'blah', frequency=PER_ALWAYS)
- h.handle_part('', handlers.CONTENT_END,
- None, None, None)
+ h.handle_part("", handlers.CONTENT_START, None, None, None)
+ h.handle_part(
+ "blah",
+ "text/upstart-job",
+ "test.conf",
+ "blah",
+ frequency=PER_ALWAYS,
+ )
+ h.handle_part("", handlers.CONTENT_END, None, None, None)
self.assertEqual(0, len(os.listdir(up_root)))
def test_upstart_frequency_single(self):
@@ -59,47 +72,54 @@ class TestUpstartJobPartHandler(FilesystemMockingTestCase):
self.patchOS(new_root)
self.patchUtils(new_root)
- paths = helpers.Paths({
- 'upstart_dir': "/etc/upstart",
- })
+ paths = helpers.Paths(
+ {
+ "upstart_dir": "/etc/upstart",
+ }
+ )
util.ensure_dir("/run")
util.ensure_dir("/etc/upstart")
- with mock.patch(self.mpath + 'SUITABLE_UPSTART', return_value=True):
- with mock.patch.object(subp, 'subp') as m_subp:
+ with mock.patch(self.mpath + "SUITABLE_UPSTART", return_value=True):
+ with mock.patch.object(subp, "subp") as m_subp:
h = UpstartJobPartHandler(paths)
- h.handle_part('', handlers.CONTENT_START,
- None, None, None)
- h.handle_part('blah', 'text/upstart-job',
- 'test.conf', 'blah', frequency=PER_INSTANCE)
- h.handle_part('', handlers.CONTENT_END,
- None, None, None)
+ h.handle_part("", handlers.CONTENT_START, None, None, None)
+ h.handle_part(
+ "blah",
+ "text/upstart-job",
+ "test.conf",
+ "blah",
+ frequency=PER_INSTANCE,
+ )
+ h.handle_part("", handlers.CONTENT_END, None, None, None)
- self.assertEqual(len(os.listdir('/etc/upstart')), 1)
+ self.assertEqual(len(os.listdir("/etc/upstart")), 1)
m_subp.assert_called_once_with(
- ['initctl', 'reload-configuration'], capture=False)
+ ["initctl", "reload-configuration"], capture=False
+ )
class TestJinjaTemplatePartHandler(CiTestCase):
with_logs = True
- mpath = 'cloudinit.handlers.jinja_template.'
+ mpath = "cloudinit.handlers.jinja_template."
def setUp(self):
super(TestJinjaTemplatePartHandler, self).setUp()
self.tmp = self.tmp_dir()
- self.run_dir = os.path.join(self.tmp, 'run_dir')
+ self.run_dir = os.path.join(self.tmp, "run_dir")
util.ensure_dir(self.run_dir)
- self.paths = helpers.Paths({
- 'cloud_dir': self.tmp, 'run_dir': self.run_dir})
+ self.paths = helpers.Paths(
+ {"cloud_dir": self.tmp, "run_dir": self.run_dir}
+ )
def test_jinja_template_part_handler_defaults(self):
"""On init, paths are saved and subhandler types are empty."""
h = JinjaTemplatePartHandler(self.paths)
- self.assertEqual(['## template: jinja'], h.prefixes)
+ self.assertEqual(["## template: jinja"], h.prefixes)
self.assertEqual(3, h.handler_version)
self.assertEqual(self.paths, h.paths)
self.assertEqual({}, h.sub_handlers)
@@ -109,34 +129,47 @@ class TestJinjaTemplatePartHandler(CiTestCase):
script_handler = ShellScriptPartHandler(self.paths)
cloudconfig_handler = CloudConfigPartHandler(self.paths)
h = JinjaTemplatePartHandler(
- self.paths, sub_handlers=[script_handler, cloudconfig_handler])
+ self.paths, sub_handlers=[script_handler, cloudconfig_handler]
+ )
self.assertCountEqual(
- ['text/cloud-config', 'text/cloud-config-jsonp',
- 'text/x-shellscript'],
- h.sub_handlers)
+ [
+ "text/cloud-config",
+ "text/cloud-config-jsonp",
+ "text/x-shellscript",
+ ],
+ h.sub_handlers,
+ )
def test_jinja_template_part_handler_looks_up_subhandler_types(self):
"""When sub_handlers are passed, init lists types of subhandlers."""
script_handler = ShellScriptPartHandler(self.paths)
cloudconfig_handler = CloudConfigPartHandler(self.paths)
h = JinjaTemplatePartHandler(
- self.paths, sub_handlers=[script_handler, cloudconfig_handler])
+ self.paths, sub_handlers=[script_handler, cloudconfig_handler]
+ )
self.assertCountEqual(
- ['text/cloud-config', 'text/cloud-config-jsonp',
- 'text/x-shellscript'],
- h.sub_handlers)
+ [
+ "text/cloud-config",
+ "text/cloud-config-jsonp",
+ "text/x-shellscript",
+ ],
+ h.sub_handlers,
+ )
def test_jinja_template_handle_noop_on_content_signals(self):
"""Perform no part handling when content type is CONTENT_SIGNALS."""
script_handler = ShellScriptPartHandler(self.paths)
- h = JinjaTemplatePartHandler(
- self.paths, sub_handlers=[script_handler])
- with mock.patch.object(script_handler, 'handle_part') as m_handle_part:
+ h = JinjaTemplatePartHandler(self.paths, sub_handlers=[script_handler])
+ with mock.patch.object(script_handler, "handle_part") as m_handle_part:
h.handle_part(
- data='data', ctype=handlers.CONTENT_START, filename='part-1',
- payload='## template: jinja\n#!/bin/bash\necho himom',
- frequency='freq', headers='headers')
+ data="data",
+ ctype=handlers.CONTENT_START,
+ filename="part-1",
+ payload="## template: jinja\n#!/bin/bash\necho himom",
+ frequency="freq",
+ headers="headers",
+ )
m_handle_part.assert_not_called()
@skipUnlessJinja()
@@ -145,21 +178,24 @@ class TestJinjaTemplatePartHandler(CiTestCase):
script_handler = ShellScriptPartHandler(self.paths)
self.assertEqual(2, script_handler.handler_version)
- # Create required instance-data.json file
- instance_json = os.path.join(self.run_dir, 'instance-data.json')
- instance_data = {'topkey': 'echo himom'}
+ # Create required instance data json file
+ instance_json = os.path.join(self.run_dir, INSTANCE_DATA_FILE)
+ instance_data = {"topkey": "echo himom"}
util.write_file(instance_json, util.json_dumps(instance_data))
- h = JinjaTemplatePartHandler(
- self.paths, sub_handlers=[script_handler])
- with mock.patch.object(script_handler, 'handle_part') as m_part:
+ h = JinjaTemplatePartHandler(self.paths, sub_handlers=[script_handler])
+ with mock.patch.object(script_handler, "handle_part") as m_part:
# ctype with leading '!' not in handlers.CONTENT_SIGNALS
h.handle_part(
- data='data', ctype="!" + handlers.CONTENT_START,
- filename='part01',
- payload='## template: jinja \t \n#!/bin/bash\n{{ topkey }}',
- frequency='freq', headers='headers')
+ data="data",
+ ctype="!" + handlers.CONTENT_START,
+ filename="part01",
+ payload="## template: jinja \t \n#!/bin/bash\n{{ topkey }}",
+ frequency="freq",
+ headers="headers",
+ )
m_part.assert_called_once_with(
- 'data', '!__begin__', 'part01', '#!/bin/bash\necho himom', 'freq')
+ "data", "!__begin__", "part01", "#!/bin/bash\necho himom", "freq"
+ )
@skipUnlessJinja()
def test_jinja_template_handle_subhandler_v3_with_clean_payload(self):
@@ -168,126 +204,172 @@ class TestJinjaTemplatePartHandler(CiTestCase):
self.assertEqual(3, cloudcfg_handler.handler_version)
# Create required instance-data.json file
- instance_json = os.path.join(self.run_dir, 'instance-data.json')
- instance_data = {'topkey': {'sub': 'runcmd: [echo hi]'}}
+ instance_json = os.path.join(self.run_dir, INSTANCE_DATA_FILE)
+ instance_data = {"topkey": {"sub": "runcmd: [echo hi]"}}
util.write_file(instance_json, util.json_dumps(instance_data))
h = JinjaTemplatePartHandler(
- self.paths, sub_handlers=[cloudcfg_handler])
- with mock.patch.object(cloudcfg_handler, 'handle_part') as m_part:
+ self.paths, sub_handlers=[cloudcfg_handler]
+ )
+ with mock.patch.object(cloudcfg_handler, "handle_part") as m_part:
# ctype with leading '!' not in handlers.CONTENT_SIGNALS
h.handle_part(
- data='data', ctype="!" + handlers.CONTENT_END,
- filename='part01',
- payload='## template: jinja\n#cloud-config\n{{ topkey.sub }}',
- frequency='freq', headers='headers')
+ data="data",
+ ctype="!" + handlers.CONTENT_END,
+ filename="part01",
+ payload="## template: jinja\n#cloud-config\n{{ topkey.sub }}",
+ frequency="freq",
+ headers="headers",
+ )
m_part.assert_called_once_with(
- 'data', '!__end__', 'part01', '#cloud-config\nruncmd: [echo hi]',
- 'freq', 'headers')
+ "data",
+ "!__end__",
+ "part01",
+ "#cloud-config\nruncmd: [echo hi]",
+ "freq",
+ "headers",
+ )
def test_jinja_template_handle_errors_on_missing_instance_data_json(self):
"""If instance-data is absent, raise an error from handle_part."""
script_handler = ShellScriptPartHandler(self.paths)
- h = JinjaTemplatePartHandler(
- self.paths, sub_handlers=[script_handler])
+ h = JinjaTemplatePartHandler(self.paths, sub_handlers=[script_handler])
with self.assertRaises(RuntimeError) as context_manager:
h.handle_part(
- data='data', ctype="!" + handlers.CONTENT_START,
- filename='part01',
- payload='## template: jinja \n#!/bin/bash\necho himom',
- frequency='freq', headers='headers')
- script_file = os.path.join(script_handler.script_dir, 'part01')
+ data="data",
+ ctype="!" + handlers.CONTENT_START,
+ filename="part01",
+ payload="## template: jinja \n#!/bin/bash\necho himom",
+ frequency="freq",
+ headers="headers",
+ )
+ script_file = os.path.join(script_handler.script_dir, "part01")
self.assertEqual(
- 'Cannot render jinja template vars. Instance data not yet present'
- ' at {}/instance-data.json'.format(
- self.run_dir), str(context_manager.exception))
+ "Cannot render jinja template vars. Instance data not yet present"
+ " at {}/{}".format(self.run_dir, INSTANCE_DATA_FILE),
+ str(context_manager.exception),
+ )
self.assertFalse(
os.path.exists(script_file),
- 'Unexpected file created %s' % script_file)
+ "Unexpected file created %s" % script_file,
+ )
def test_jinja_template_handle_errors_on_unreadable_instance_data(self):
"""If instance-data is unreadable, raise an error from handle_part."""
script_handler = ShellScriptPartHandler(self.paths)
- instance_json = os.path.join(self.run_dir, 'instance-data.json')
+ instance_json = os.path.join(self.run_dir, INSTANCE_DATA_FILE)
util.write_file(instance_json, util.json_dumps({}))
- h = JinjaTemplatePartHandler(
- self.paths, sub_handlers=[script_handler])
- with mock.patch(self.mpath + 'load_file') as m_load:
+ h = JinjaTemplatePartHandler(self.paths, sub_handlers=[script_handler])
+ with mock.patch(self.mpath + "load_file") as m_load:
with self.assertRaises(RuntimeError) as context_manager:
- m_load.side_effect = OSError(errno.EACCES, 'Not allowed')
+ m_load.side_effect = OSError(errno.EACCES, "Not allowed")
h.handle_part(
- data='data', ctype="!" + handlers.CONTENT_START,
- filename='part01',
- payload='## template: jinja \n#!/bin/bash\necho himom',
- frequency='freq', headers='headers')
- script_file = os.path.join(script_handler.script_dir, 'part01')
+ data="data",
+ ctype="!" + handlers.CONTENT_START,
+ filename="part01",
+ payload="## template: jinja \n#!/bin/bash\necho himom",
+ frequency="freq",
+ headers="headers",
+ )
+ script_file = os.path.join(script_handler.script_dir, "part01")
self.assertEqual(
- 'Cannot render jinja template vars. No read permission on'
- " '{rdir}/instance-data.json'. Try sudo".format(rdir=self.run_dir),
- str(context_manager.exception))
+ "Cannot render jinja template vars. No read permission on "
+ "'{}/{}'. Try sudo".format(self.run_dir, INSTANCE_DATA_FILE),
+ str(context_manager.exception),
+ )
self.assertFalse(
os.path.exists(script_file),
- 'Unexpected file created %s' % script_file)
+ "Unexpected file created %s" % script_file,
+ )
@skipUnlessJinja()
def test_jinja_template_handle_renders_jinja_content(self):
- """When present, render jinja variables from instance-data.json."""
+ """When present, render jinja variables from instance data"""
script_handler = ShellScriptPartHandler(self.paths)
- instance_json = os.path.join(self.run_dir, 'instance-data.json')
- instance_data = {'topkey': {'subkey': 'echo himom'}}
+ instance_json = os.path.join(self.run_dir, INSTANCE_DATA_FILE)
+ instance_data = {"topkey": {"subkey": "echo himom"}}
util.write_file(instance_json, util.json_dumps(instance_data))
- h = JinjaTemplatePartHandler(
- self.paths, sub_handlers=[script_handler])
+ h = JinjaTemplatePartHandler(self.paths, sub_handlers=[script_handler])
h.handle_part(
- data='data', ctype="!" + handlers.CONTENT_START,
- filename='part01',
+ data="data",
+ ctype="!" + handlers.CONTENT_START,
+ filename="part01",
payload=(
- '## template: jinja \n'
- '#!/bin/bash\n'
- '{{ topkey.subkey|default("nosubkey") }}'),
- frequency='freq', headers='headers')
- script_file = os.path.join(script_handler.script_dir, 'part01')
+ "## template: jinja \n"
+ "#!/bin/bash\n"
+ '{{ topkey.subkey|default("nosubkey") }}'
+ ),
+ frequency="freq",
+ headers="headers",
+ )
+ script_file = os.path.join(script_handler.script_dir, "part01")
self.assertNotIn(
- 'Instance data not yet present at {}/instance-data.json'.format(
- self.run_dir),
- self.logs.getvalue())
+ "Instance data not yet present at {}/{}".format(
+ self.run_dir, INSTANCE_DATA_FILE
+ ),
+ self.logs.getvalue(),
+ )
self.assertEqual(
- '#!/bin/bash\necho himom', util.load_file(script_file))
+ "#!/bin/bash\necho himom", util.load_file(script_file)
+ )
@skipUnlessJinja()
def test_jinja_template_handle_renders_jinja_content_missing_keys(self):
"""When specified jinja variable is undefined, log a warning."""
script_handler = ShellScriptPartHandler(self.paths)
- instance_json = os.path.join(self.run_dir, 'instance-data.json')
- instance_data = {'topkey': {'subkey': 'echo himom'}}
+ instance_json = os.path.join(self.run_dir, INSTANCE_DATA_FILE)
+ instance_data = {"topkey": {"subkey": "echo himom"}}
util.write_file(instance_json, util.json_dumps(instance_data))
- h = JinjaTemplatePartHandler(
- self.paths, sub_handlers=[script_handler])
+ h = JinjaTemplatePartHandler(self.paths, sub_handlers=[script_handler])
h.handle_part(
- data='data', ctype="!" + handlers.CONTENT_START,
- filename='part01',
- payload='## template: jinja \n#!/bin/bash\n{{ goodtry }}',
- frequency='freq', headers='headers')
- script_file = os.path.join(script_handler.script_dir, 'part01')
+ data="data",
+ ctype="!" + handlers.CONTENT_START,
+ filename="part01",
+ payload="## template: jinja \n#!/bin/bash\n{{ goodtry }}",
+ frequency="freq",
+ headers="headers",
+ )
+ script_file = os.path.join(script_handler.script_dir, "part01")
self.assertTrue(
os.path.exists(script_file),
- 'Missing expected file %s' % script_file)
+ "Missing expected file %s" % script_file,
+ )
self.assertIn(
"WARNING: Could not render jinja template variables in file"
" 'part01': 'goodtry'\n",
- self.logs.getvalue())
-
-
-class TestConvertJinjaInstanceData(CiTestCase):
-
- def test_convert_instance_data_hyphens_to_underscores(self):
- """Replace hyphenated keys with underscores in instance-data."""
- data = {'hyphenated-key': 'hyphenated-val',
- 'underscore_delim_key': 'underscore_delimited_val'}
- expected_data = {'hyphenated_key': 'hyphenated-val',
- 'underscore_delim_key': 'underscore_delimited_val'}
- self.assertEqual(
- expected_data,
- convert_jinja_instance_data(data=data))
+ self.logs.getvalue(),
+ )
+
+
+class TestConvertJinjaInstanceData:
+ @pytest.mark.parametrize(
+ "include_key_aliases,data,expected",
+ (
+ (False, {"my-key": "my-val"}, {"my-key": "my-val"}),
+ (
+ True,
+ {"my-key": "my-val"},
+ {"my-key": "my-val", "my_key": "my-val"},
+ ),
+ (False, {"my.key": "my.val"}, {"my.key": "my.val"}),
+ (
+ True,
+ {"my.key": "my.val"},
+ {"my.key": "my.val", "my_key": "my.val"},
+ ),
+ (
+ True,
+ {"my/key": "my/val"},
+ {"my/key": "my/val", "my_key": "my/val"},
+ ),
+ ),
+ )
+ def test_convert_instance_data_operators_to_underscores(
+ self, include_key_aliases, data, expected
+ ):
+ """Replace Jinja operators keys with underscores in instance-data."""
+ assert expected == convert_jinja_instance_data(
+ data=data, include_key_aliases=include_key_aliases
+ )
def test_convert_instance_data_promotes_versioned_keys_to_top_level(self):
"""Any versioned keys are promoted as top-level keys
@@ -296,45 +378,49 @@ class TestConvertJinjaInstanceData(CiTestCase):
allow ease of reference for users. Intsead of v1.availability_zone,
the name availability_zone can be used in templates.
"""
- data = {'ds': {'dskey1': 1, 'dskey2': 2},
- 'v1': {'v1key1': 'v1.1'},
- 'v2': {'v2key1': 'v2.1'}}
+ data = {
+ "ds": {"dskey1": 1, "dskey2": 2},
+ "v1": {"v1key1": "v1.1"},
+ "v2": {"v2key1": "v2.1"},
+ }
expected_data = copy.deepcopy(data)
- expected_data.update({'v1key1': 'v1.1', 'v2key1': 'v2.1'})
+ expected_data.update({"v1key1": "v1.1", "v2key1": "v2.1"})
converted_data = convert_jinja_instance_data(data=data)
- self.assertCountEqual(
- ['ds', 'v1', 'v2', 'v1key1', 'v2key1'], converted_data.keys())
- self.assertEqual(
- expected_data,
- converted_data)
+ assert sorted(["ds", "v1", "v2", "v1key1", "v2key1"]) == sorted(
+ converted_data.keys()
+ )
+ assert expected_data == converted_data
def test_convert_instance_data_most_recent_version_of_promoted_keys(self):
"""The most-recent versioned key value is promoted to top-level."""
- data = {'v1': {'key1': 'old v1 key1', 'key2': 'old v1 key2'},
- 'v2': {'key1': 'newer v2 key1', 'key3': 'newer v2 key3'},
- 'v3': {'key1': 'newest v3 key1'}}
+ data = {
+ "v1": {"key1": "old v1 key1", "key2": "old v1 key2"},
+ "v2": {"key1": "newer v2 key1", "key3": "newer v2 key3"},
+ "v3": {"key1": "newest v3 key1"},
+ }
expected_data = copy.deepcopy(data)
expected_data.update(
- {'key1': 'newest v3 key1', 'key2': 'old v1 key2',
- 'key3': 'newer v2 key3'})
+ {
+ "key1": "newest v3 key1",
+ "key2": "old v1 key2",
+ "key3": "newer v2 key3",
+ }
+ )
converted_data = convert_jinja_instance_data(data=data)
- self.assertEqual(
- expected_data,
- converted_data)
+ assert expected_data == converted_data
def test_convert_instance_data_decodes_decode_paths(self):
"""Any decode_paths provided are decoded by convert_instance_data."""
- data = {'key1': {'subkey1': 'aGkgbW9t'}, 'key2': 'aGkgZGFk'}
+ data = {"key1": {"subkey1": "aGkgbW9t"}, "key2": "aGkgZGFk"}
expected_data = copy.deepcopy(data)
- expected_data['key1']['subkey1'] = 'hi mom'
+ expected_data["key1"]["subkey1"] = "hi mom"
converted_data = convert_jinja_instance_data(
- data=data, decode_paths=('key1/subkey1',))
- self.assertEqual(
- expected_data,
- converted_data)
+ data=data, decode_paths=("key1/subkey1",)
+ )
+ assert expected_data == converted_data
class TestRenderJinjaPayload(CiTestCase):
@@ -345,39 +431,69 @@ class TestRenderJinjaPayload(CiTestCase):
def test_render_jinja_payload_logs_jinja_vars_on_debug(self):
"""When debug is True, log jinja varables available."""
payload = (
- '## template: jinja\n#!/bin/sh\necho hi from {{ v1.hostname }}')
- instance_data = {'v1': {'hostname': 'foo'}, 'instance-id': 'iid'}
- expected_log = dedent("""\
+ "## template: jinja\n#!/bin/sh\necho hi from {{ v1.hostname }}"
+ )
+ instance_data = {"v1": {"hostname": "foo"}, "instance-id": "iid"}
+ expected_log = dedent(
+ """\
DEBUG: Converted jinja variables
{
"hostname": "foo",
+ "instance-id": "iid",
"instance_id": "iid",
"v1": {
"hostname": "foo"
}
}
- """)
+ """
+ )
self.assertEqual(
render_jinja_payload(
- payload=payload, payload_fn='myfile',
- instance_data=instance_data, debug=True),
- '#!/bin/sh\necho hi from foo')
+ payload=payload,
+ payload_fn="myfile",
+ instance_data=instance_data,
+ debug=True,
+ ),
+ "#!/bin/sh\necho hi from foo",
+ )
self.assertEqual(expected_log, self.logs.getvalue())
@skipUnlessJinja()
def test_render_jinja_payload_replaces_missing_variables_and_warns(self):
"""Warn on missing jinja variables and replace the absent variable."""
- payload = (
- '## template: jinja\n#!/bin/sh\necho hi from {{ NOTHERE }}')
- instance_data = {'v1': {'hostname': 'foo'}, 'instance-id': 'iid'}
+ payload = "## template: jinja\n#!/bin/sh\necho hi from {{ NOTHERE }}"
+ instance_data = {"v1": {"hostname": "foo"}, "instance-id": "iid"}
self.assertEqual(
render_jinja_payload(
- payload=payload, payload_fn='myfile',
- instance_data=instance_data),
- '#!/bin/sh\necho hi from CI_MISSING_JINJA_VAR/NOTHERE')
+ payload=payload,
+ payload_fn="myfile",
+ instance_data=instance_data,
+ ),
+ "#!/bin/sh\necho hi from CI_MISSING_JINJA_VAR/NOTHERE",
+ )
expected_log = (
- 'WARNING: Could not render jinja template variables in file'
- " 'myfile': 'NOTHERE'")
+ "WARNING: Could not render jinja template variables in file"
+ " 'myfile': 'NOTHERE'"
+ )
self.assertIn(expected_log, self.logs.getvalue())
+
+class TestShellScriptByFrequencyHandlers:
+ def do_test_frequency(self, frequency):
+ ci_paths = read_cfg_paths()
+ scripts_dir = ci_paths.get_cpath("scripts")
+ testFolder = os.path.join(scripts_dir, path_map[frequency])
+ folder = get_script_folder_by_frequency(frequency, scripts_dir)
+ assert testFolder == folder
+
+ def test_get_script_folder_per_boot(self):
+ self.do_test_frequency(PER_ALWAYS)
+
+ def test_get_script_folder_per_instance(self):
+ self.do_test_frequency(PER_INSTANCE)
+
+ def test_get_script_folder_per_once(self):
+ self.do_test_frequency(PER_ONCE)
+
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py
index 74f85959..bed73a93 100644
--- a/tests/unittests/test_cli.py
+++ b/tests/unittests/test_cli.py
@@ -1,13 +1,13 @@
# This file is part of cloud-init. See LICENSE file for license information.
-import os
+import contextlib
import io
+import os
from collections import namedtuple
from cloudinit.cmd import main as cli
-from cloudinit.tests import helpers as test_helpers
from cloudinit.util import load_file, load_json
-
+from tests.unittests import helpers as test_helpers
mock = test_helpers.mock
@@ -23,7 +23,7 @@ class TestCLI(test_helpers.FilesystemMockingTestCase):
def _call_main(self, sysv_args=None):
if not sysv_args:
- sysv_args = ['cloud-init']
+ sysv_args = ["cloud-init"]
try:
return cli.main(sysv_args=sysv_args)
except SystemExit as e:
@@ -35,36 +35,37 @@ class TestCLI(test_helpers.FilesystemMockingTestCase):
Valid name values are only init and modules.
"""
tmpd = self.tmp_dir()
- data_d = self.tmp_path('data', tmpd)
- link_d = self.tmp_path('link', tmpd)
- FakeArgs = namedtuple('FakeArgs', ['action', 'local', 'mode'])
+ data_d = self.tmp_path("data", tmpd)
+ link_d = self.tmp_path("link", tmpd)
+ FakeArgs = namedtuple("FakeArgs", ["action", "local", "mode"])
def myaction():
- raise Exception('Should not call myaction')
+ raise Exception("Should not call myaction")
- myargs = FakeArgs(('doesnotmatter', myaction), False, 'bogusmode')
+ myargs = FakeArgs(("doesnotmatter", myaction), False, "bogusmode")
with self.assertRaises(ValueError) as cm:
- cli.status_wrapper('init1', myargs, data_d, link_d)
- self.assertEqual('unknown name: init1', str(cm.exception))
- self.assertNotIn('Should not call myaction', self.logs.getvalue())
+ cli.status_wrapper("init1", myargs, data_d, link_d)
+ self.assertEqual("unknown name: init1", str(cm.exception))
+ self.assertNotIn("Should not call myaction", self.logs.getvalue())
def test_status_wrapper_errors_on_invalid_modes(self):
"""status_wrapper will error if a parameter combination is invalid."""
tmpd = self.tmp_dir()
- data_d = self.tmp_path('data', tmpd)
- link_d = self.tmp_path('link', tmpd)
- FakeArgs = namedtuple('FakeArgs', ['action', 'local', 'mode'])
+ data_d = self.tmp_path("data", tmpd)
+ link_d = self.tmp_path("link", tmpd)
+ FakeArgs = namedtuple("FakeArgs", ["action", "local", "mode"])
def myaction():
- raise Exception('Should not call myaction')
+ raise Exception("Should not call myaction")
- myargs = FakeArgs(('modules_name', myaction), False, 'bogusmode')
+ myargs = FakeArgs(("modules_name", myaction), False, "bogusmode")
with self.assertRaises(ValueError) as cm:
- cli.status_wrapper('modules', myargs, data_d, link_d)
+ cli.status_wrapper("modules", myargs, data_d, link_d)
self.assertEqual(
"Invalid cloud init mode specified 'modules-bogusmode'",
- str(cm.exception))
- self.assertNotIn('Should not call myaction', self.logs.getvalue())
+ str(cm.exception),
+ )
+ self.assertNotIn("Should not call myaction", self.logs.getvalue())
def test_status_wrapper_init_local_writes_fresh_status_info(self):
"""When running in init-local mode, status_wrapper writes status.json.
@@ -72,78 +73,90 @@ class TestCLI(test_helpers.FilesystemMockingTestCase):
Old status and results artifacts are also removed.
"""
tmpd = self.tmp_dir()
- data_d = self.tmp_path('data', tmpd)
- link_d = self.tmp_path('link', tmpd)
- status_link = self.tmp_path('status.json', link_d)
+ data_d = self.tmp_path("data", tmpd)
+ link_d = self.tmp_path("link", tmpd)
+ status_link = self.tmp_path("status.json", link_d)
# Write old artifacts which will be removed or updated.
for _dir in data_d, link_d:
test_helpers.populate_dir(
- _dir, {'status.json': 'old', 'result.json': 'old'})
+ _dir, {"status.json": "old", "result.json": "old"}
+ )
- FakeArgs = namedtuple('FakeArgs', ['action', 'local', 'mode'])
+ FakeArgs = namedtuple("FakeArgs", ["action", "local", "mode"])
def myaction(name, args):
# Return an error to watch status capture them
- return 'SomeDatasource', ['an error']
+ return "SomeDatasource", ["an error"]
- myargs = FakeArgs(('ignored_name', myaction), True, 'bogusmode')
- cli.status_wrapper('init', myargs, data_d, link_d)
+ myargs = FakeArgs(("ignored_name", myaction), True, "bogusmode")
+ cli.status_wrapper("init", myargs, data_d, link_d)
# No errors reported in status
- status_v1 = load_json(load_file(status_link))['v1']
- self.assertEqual(['an error'], status_v1['init-local']['errors'])
- self.assertEqual('SomeDatasource', status_v1['datasource'])
+ status_v1 = load_json(load_file(status_link))["v1"]
+ self.assertEqual(["an error"], status_v1["init-local"]["errors"])
+ self.assertEqual("SomeDatasource", status_v1["datasource"])
self.assertFalse(
- os.path.exists(self.tmp_path('result.json', data_d)),
- 'unexpected result.json found')
+ os.path.exists(self.tmp_path("result.json", data_d)),
+ "unexpected result.json found",
+ )
self.assertFalse(
- os.path.exists(self.tmp_path('result.json', link_d)),
- 'unexpected result.json link found')
+ os.path.exists(self.tmp_path("result.json", link_d)),
+ "unexpected result.json link found",
+ )
def test_no_arguments_shows_usage(self):
exit_code = self._call_main()
- self.assertIn('usage: cloud-init', self.stderr.getvalue())
+ self.assertIn("usage: cloud-init", self.stderr.getvalue())
self.assertEqual(2, exit_code)
def test_no_arguments_shows_error_message(self):
exit_code = self._call_main()
missing_subcommand_message = [
- 'too few arguments', # python2.7 msg
- 'the following arguments are required: subcommand' # python3 msg
+ "too few arguments", # python2.7 msg
+ "the following arguments are required: subcommand", # python3 msg
]
error = self.stderr.getvalue()
- matches = ([msg in error for msg in missing_subcommand_message])
+ matches = [msg in error for msg in missing_subcommand_message]
self.assertTrue(
- any(matches), 'Did not find error message for missing subcommand')
+ any(matches), "Did not find error message for missing subcommand"
+ )
self.assertEqual(2, exit_code)
def test_all_subcommands_represented_in_help(self):
"""All known subparsers are represented in the cloud-int help doc."""
self._call_main()
error = self.stderr.getvalue()
- expected_subcommands = ['analyze', 'clean', 'devel', 'dhclient-hook',
- 'features', 'init', 'modules', 'single']
+ expected_subcommands = [
+ "analyze",
+ "clean",
+ "devel",
+ "dhclient-hook",
+ "features",
+ "init",
+ "modules",
+ "single",
+ ]
for subcommand in expected_subcommands:
self.assertIn(subcommand, error)
- @mock.patch('cloudinit.cmd.main.status_wrapper')
+ @mock.patch("cloudinit.cmd.main.status_wrapper")
def test_init_subcommand_parser(self, m_status_wrapper):
"""The subcommand 'init' calls status_wrapper passing init."""
- self._call_main(['cloud-init', 'init'])
+ self._call_main(["cloud-init", "init"])
(name, parseargs) = m_status_wrapper.call_args_list[0][0]
- self.assertEqual('init', name)
- self.assertEqual('init', parseargs.subcommand)
- self.assertEqual('init', parseargs.action[0])
- self.assertEqual('main_init', parseargs.action[1].__name__)
+ self.assertEqual("init", name)
+ self.assertEqual("init", parseargs.subcommand)
+ self.assertEqual("init", parseargs.action[0])
+ self.assertEqual("main_init", parseargs.action[1].__name__)
- @mock.patch('cloudinit.cmd.main.status_wrapper')
+ @mock.patch("cloudinit.cmd.main.status_wrapper")
def test_modules_subcommand_parser(self, m_status_wrapper):
"""The subcommand 'modules' calls status_wrapper passing modules."""
- self._call_main(['cloud-init', 'modules'])
+ self._call_main(["cloud-init", "modules"])
(name, parseargs) = m_status_wrapper.call_args_list[0][0]
- self.assertEqual('modules', name)
- self.assertEqual('modules', parseargs.subcommand)
- self.assertEqual('modules', parseargs.action[0])
- self.assertEqual('main_modules', parseargs.action[1].__name__)
+ self.assertEqual("modules", name)
+ self.assertEqual("modules", parseargs.subcommand)
+ self.assertEqual("modules", parseargs.action[0])
+ self.assertEqual("main_modules", parseargs.action[1].__name__)
def test_conditional_subcommands_from_entry_point_sys_argv(self):
"""Subcommands from entry-point are properly parsed from sys.argv."""
@@ -151,14 +164,22 @@ class TestCLI(test_helpers.FilesystemMockingTestCase):
self.patchStdoutAndStderr(stdout=stdout)
expected_errors = [
- 'usage: cloud-init analyze', 'usage: cloud-init clean',
- 'usage: cloud-init collect-logs', 'usage: cloud-init devel',
- 'usage: cloud-init status']
+ "usage: cloud-init analyze",
+ "usage: cloud-init clean",
+ "usage: cloud-init collect-logs",
+ "usage: cloud-init devel",
+ "usage: cloud-init status",
+ ]
conditional_subcommands = [
- 'analyze', 'clean', 'collect-logs', 'devel', 'status']
+ "analyze",
+ "clean",
+ "collect-logs",
+ "devel",
+ "status",
+ ]
# The cloud-init entrypoint calls main without passing sys_argv
for subcommand in conditional_subcommands:
- with mock.patch('sys.argv', ['cloud-init', subcommand, '-h']):
+ with mock.patch("sys.argv", ["cloud-init", subcommand, "-h"]):
try:
cli.main()
except SystemExit as e:
@@ -168,9 +189,9 @@ class TestCLI(test_helpers.FilesystemMockingTestCase):
def test_analyze_subcommand_parser(self):
"""The subcommand cloud-init analyze calls the correct subparser."""
- self._call_main(['cloud-init', 'analyze'])
+ self._call_main(["cloud-init", "analyze"])
# These subcommands only valid for cloud-init analyze script
- expected_subcommands = ['blame', 'show', 'dump']
+ expected_subcommands = ["blame", "show", "dump"]
error = self.stderr.getvalue()
for subcommand in expected_subcommands:
self.assertIn(subcommand, error)
@@ -180,94 +201,177 @@ class TestCLI(test_helpers.FilesystemMockingTestCase):
# Provide -h param to collect-logs to avoid having to mock behavior.
stdout = io.StringIO()
self.patchStdoutAndStderr(stdout=stdout)
- self._call_main(['cloud-init', 'collect-logs', '-h'])
- self.assertIn('usage: cloud-init collect-log', stdout.getvalue())
+ self._call_main(["cloud-init", "collect-logs", "-h"])
+ self.assertIn("usage: cloud-init collect-log", stdout.getvalue())
def test_clean_subcommand_parser(self):
"""The subcommand cloud-init clean calls the subparser."""
# Provide -h param to clean to avoid having to mock behavior.
stdout = io.StringIO()
self.patchStdoutAndStderr(stdout=stdout)
- self._call_main(['cloud-init', 'clean', '-h'])
- self.assertIn('usage: cloud-init clean', stdout.getvalue())
+ self._call_main(["cloud-init", "clean", "-h"])
+ self.assertIn("usage: cloud-init clean", stdout.getvalue())
def test_status_subcommand_parser(self):
"""The subcommand cloud-init status calls the subparser."""
# Provide -h param to clean to avoid having to mock behavior.
stdout = io.StringIO()
self.patchStdoutAndStderr(stdout=stdout)
- self._call_main(['cloud-init', 'status', '-h'])
- self.assertIn('usage: cloud-init status', stdout.getvalue())
+ self._call_main(["cloud-init", "status", "-h"])
+ self.assertIn("usage: cloud-init status", stdout.getvalue())
def test_devel_subcommand_parser(self):
"""The subcommand cloud-init devel calls the correct subparser."""
- self._call_main(['cloud-init', 'devel'])
+ self._call_main(["cloud-init", "devel"])
# These subcommands only valid for cloud-init schema script
- expected_subcommands = ['schema']
+ expected_subcommands = ["schema"]
error = self.stderr.getvalue()
for subcommand in expected_subcommands:
self.assertIn(subcommand, error)
def test_wb_devel_schema_subcommand_parser(self):
"""The subcommand cloud-init schema calls the correct subparser."""
- exit_code = self._call_main(['cloud-init', 'devel', 'schema'])
+ exit_code = self._call_main(["cloud-init", "devel", "schema"])
self.assertEqual(1, exit_code)
# Known whitebox output from schema subcommand
self.assertEqual(
- 'Expected one of --config-file, --system or --docs arguments\n',
- self.stderr.getvalue())
+ "Error:\n"
+ "Expected one of --config-file, --system or --docs arguments\n",
+ self.stderr.getvalue(),
+ )
+
+ def test_wb_devel_schema_subcommand_doc_all_spot_check(self):
+ """Validate that doc content has correct values from known examples.
+
+ Ensure that schema doc is returned
+ """
- def test_wb_devel_schema_subcommand_doc_content(self):
- """Validate that doc content is sane from known examples."""
+ # Note: patchStdoutAndStderr() is convenient for reducing boilerplate,
+ # but inspecting the code for debugging is not ideal
+ # contextlib.redirect_stdout() provides similar behavior as a context
+ # manager
stdout = io.StringIO()
- self.patchStdoutAndStderr(stdout=stdout)
- self._call_main(['cloud-init', 'devel', 'schema', '--docs', 'all'])
- expected_doc_sections = [
- '**Supported distros:** all',
- '**Supported distros:** alpine, centos, debian, fedora',
- '**Config schema**:\n **resize_rootfs:** (true/false/noblock)',
- '**Examples**::\n\n runcmd:\n - [ ls, -l, / ]\n'
- ]
+ with contextlib.redirect_stdout(stdout):
+ self._call_main(["cloud-init", "devel", "schema", "--docs", "all"])
+ expected_doc_sections = [
+ "**Supported distros:** all",
+ "**Supported distros:** almalinux, alpine, centos, "
+ "cloudlinux, debian, eurolinux, fedora, miraclelinux, "
+ "openEuler, opensuse, photon, rhel, rocky, sles, ubuntu, "
+ "virtuozzo",
+ "**Config schema**:\n **resize_rootfs:** "
+ "(true/false/noblock)",
+ "**Examples**::\n\n runcmd:\n - [ ls, -l, / ]\n",
+ ]
stdout = stdout.getvalue()
for expected in expected_doc_sections:
self.assertIn(expected, stdout)
- @mock.patch('cloudinit.cmd.main.main_single')
+ def test_wb_devel_schema_subcommand_single_spot_check(self):
+ """Validate that doc content has correct values from known example.
+
+ Validate 'all' arg
+ """
+
+ # Note: patchStdoutAndStderr() is convenient for reducing boilerplate,
+ # but inspecting the code for debugging is not ideal
+ # contextlib.redirect_stdout() provides similar behavior as a context
+ # manager
+ stdout = io.StringIO()
+ with contextlib.redirect_stdout(stdout):
+ self._call_main(
+ ["cloud-init", "devel", "schema", "--docs", "cc_runcmd"]
+ )
+ expected_doc_sections = [
+ "Runcmd\n------\n**Summary:** Run arbitrary commands"
+ ]
+ stdout = stdout.getvalue()
+ for expected in expected_doc_sections:
+ self.assertIn(expected, stdout)
+
+ def test_wb_devel_schema_subcommand_multiple_spot_check(self):
+ """Validate that doc content has correct values from known example.
+
+ Validate single arg
+ """
+
+ stdout = io.StringIO()
+ with contextlib.redirect_stdout(stdout):
+ self._call_main(
+ [
+ "cloud-init",
+ "devel",
+ "schema",
+ "--docs",
+ "cc_runcmd",
+ "cc_resizefs",
+ ]
+ )
+ expected_doc_sections = [
+ "Runcmd\n------\n**Summary:** Run arbitrary commands",
+ "Resizefs\n--------\n**Summary:** Resize filesystem",
+ ]
+ stdout = stdout.getvalue()
+ for expected in expected_doc_sections:
+ self.assertIn(expected, stdout)
+
+ def test_wb_devel_schema_subcommand_bad_arg_fails(self):
+ """Validate that doc content has correct values from known example.
+
+ Validate multiple args
+ """
+
+ # Note: patchStdoutAndStderr() is convenient for reducing boilerplate,
+ # but inspecting the code for debugging is not ideal
+ # contextlib.redirect_stdout() provides similar behavior as a context
+ # manager
+ stderr = io.StringIO()
+ with contextlib.redirect_stderr(stderr):
+ self._call_main(
+ ["cloud-init", "devel", "schema", "--docs", "garbage_value"]
+ )
+ expected_doc_sections = ["Invalid --docs value"]
+ stderr = stderr.getvalue()
+ for expected in expected_doc_sections:
+ self.assertIn(expected, stderr)
+
+ @mock.patch("cloudinit.cmd.main.main_single")
def test_single_subcommand(self, m_main_single):
"""The subcommand 'single' calls main_single with valid args."""
- self._call_main(['cloud-init', 'single', '--name', 'cc_ntp'])
+ self._call_main(["cloud-init", "single", "--name", "cc_ntp"])
(name, parseargs) = m_main_single.call_args_list[0][0]
- self.assertEqual('single', name)
- self.assertEqual('single', parseargs.subcommand)
- self.assertEqual('single', parseargs.action[0])
+ self.assertEqual("single", name)
+ self.assertEqual("single", parseargs.subcommand)
+ self.assertEqual("single", parseargs.action[0])
self.assertFalse(parseargs.debug)
self.assertFalse(parseargs.force)
self.assertIsNone(parseargs.frequency)
- self.assertEqual('cc_ntp', parseargs.name)
+ self.assertEqual("cc_ntp", parseargs.name)
self.assertFalse(parseargs.report)
- @mock.patch('cloudinit.cmd.main.dhclient_hook.handle_args')
+ @mock.patch("cloudinit.cmd.main.dhclient_hook.handle_args")
def test_dhclient_hook_subcommand(self, m_handle_args):
"""The subcommand 'dhclient-hook' calls dhclient_hook with args."""
- self._call_main(['cloud-init', 'dhclient-hook', 'up', 'eth0'])
+ self._call_main(["cloud-init", "dhclient-hook", "up", "eth0"])
(name, parseargs) = m_handle_args.call_args_list[0][0]
- self.assertEqual('dhclient-hook', name)
- self.assertEqual('dhclient-hook', parseargs.subcommand)
- self.assertEqual('dhclient-hook', parseargs.action[0])
+ self.assertEqual("dhclient-hook", name)
+ self.assertEqual("dhclient-hook", parseargs.subcommand)
+ self.assertEqual("dhclient-hook", parseargs.action[0])
self.assertFalse(parseargs.debug)
self.assertFalse(parseargs.force)
- self.assertEqual('up', parseargs.event)
- self.assertEqual('eth0', parseargs.interface)
+ self.assertEqual("up", parseargs.event)
+ self.assertEqual("eth0", parseargs.interface)
- @mock.patch('cloudinit.cmd.main.main_features')
+ @mock.patch("cloudinit.cmd.main.main_features")
def test_features_hook_subcommand(self, m_features):
"""The subcommand 'features' calls main_features with args."""
- self._call_main(['cloud-init', 'features'])
+ self._call_main(["cloud-init", "features"])
(name, parseargs) = m_features.call_args_list[0][0]
- self.assertEqual('features', name)
- self.assertEqual('features', parseargs.subcommand)
- self.assertEqual('features', parseargs.action[0])
+ self.assertEqual("features", name)
+ self.assertEqual("features", parseargs.subcommand)
+ self.assertEqual("features", parseargs.action[0])
self.assertFalse(parseargs.debug)
self.assertFalse(parseargs.force)
+
# : ts=4 expandtab
diff --git a/tests/unittests/test_conftest.py b/tests/unittests/test_conftest.py
new file mode 100644
index 00000000..68903430
--- /dev/null
+++ b/tests/unittests/test_conftest.py
@@ -0,0 +1,65 @@
+import pytest
+
+from cloudinit import subp
+from tests.unittests.helpers import CiTestCase
+
+
+class TestDisableSubpUsage:
+ """Test that the disable_subp_usage fixture behaves as expected."""
+
+ def test_using_subp_raises_assertion_error(self):
+ with pytest.raises(AssertionError):
+ subp.subp(["some", "args"])
+
+ def test_typeerrors_on_incorrect_usage(self):
+ with pytest.raises(TypeError):
+ # We are intentionally passing no value for a parameter, so:
+ # pylint: disable=no-value-for-parameter
+ subp.subp()
+
+ @pytest.mark.allow_all_subp
+ def test_subp_usage_can_be_reenabled(self):
+ subp.subp(["whoami"])
+
+ @pytest.mark.allow_subp_for("whoami")
+ def test_subp_usage_can_be_conditionally_reenabled(self):
+ # The two parameters test each potential invocation with a single
+ # argument
+ with pytest.raises(AssertionError) as excinfo:
+ subp.subp(["some", "args"])
+ assert "allowed: whoami" in str(excinfo.value)
+ subp.subp(["whoami"])
+
+ @pytest.mark.allow_subp_for("whoami", "bash")
+ def test_subp_usage_can_be_conditionally_reenabled_for_multiple_cmds(self):
+ with pytest.raises(AssertionError) as excinfo:
+ subp.subp(["some", "args"])
+ assert "allowed: whoami,bash" in str(excinfo.value)
+ subp.subp(["bash", "-c", "true"])
+ subp.subp(["whoami"])
+
+ @pytest.mark.allow_all_subp
+ @pytest.mark.allow_subp_for("bash")
+ def test_both_marks_raise_an_error(self):
+ with pytest.raises(AssertionError, match="marked both"):
+ subp.subp(["bash"])
+
+
+class TestDisableSubpUsageInTestSubclass(CiTestCase):
+ """Test that disable_subp_usage doesn't impact CiTestCase's subp logic."""
+
+ def test_using_subp_raises_exception(self):
+ with pytest.raises(Exception):
+ subp.subp(["some", "args"])
+
+ def test_typeerrors_on_incorrect_usage(self):
+ with pytest.raises(TypeError):
+ subp.subp()
+
+ def test_subp_usage_can_be_reenabled(self):
+ _old_allowed_subp = self.allow_subp
+ self.allowed_subp = True
+ try:
+ subp.subp(["bash", "-c", "true"])
+ finally:
+ self.allowed_subp = _old_allowed_subp
diff --git a/tests/unittests/test_cs_util.py b/tests/unittests/test_cs_util.py
index bfd07ecf..109e0208 100644
--- a/tests/unittests/test_cs_util.py
+++ b/tests/unittests/test_cs_util.py
@@ -1,9 +1,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.tests import helpers as test_helpers
-
from cloudinit.cs_utils import Cepko
-
+from tests.unittests import helpers as test_helpers
SERVER_CONTEXT = {
"cpu": 1000,
@@ -16,7 +14,7 @@ SERVER_CONTEXT = {
"smp": 1,
"tags": ["much server", "very performance"],
"uuid": "65b2fb23-8c03-4187-a3ba-8b7c919e889",
- "vnc_password": "9e84d6cb49e46379"
+ "vnc_password": "9e84d6cb49e46379",
}
@@ -25,7 +23,7 @@ class CepkoMock(Cepko):
return SERVER_CONTEXT
def get(self, key="", request_pattern=None):
- return SERVER_CONTEXT['tags']
+ return SERVER_CONTEXT["tags"]
# 2015-01-22 BAW: This test is completely useless because it only ever tests
@@ -34,33 +32,36 @@ class CepkoMock(Cepko):
class CepkoResultTests(test_helpers.TestCase):
def setUp(self):
self.c = Cepko()
- raise test_helpers.SkipTest('This test is completely useless')
+ raise test_helpers.SkipTest("This test is completely useless")
def test_getitem(self):
result = self.c.all()
- self.assertEqual("65b2fb23-8c03-4187-a3ba-8b7c919e889", result['uuid'])
- self.assertEqual([], result['requirements'])
- self.assertEqual("much server", result['tags'][0])
- self.assertEqual(1, result['smp'])
+ self.assertEqual("65b2fb23-8c03-4187-a3ba-8b7c919e889", result["uuid"])
+ self.assertEqual([], result["requirements"])
+ self.assertEqual("much server", result["tags"][0])
+ self.assertEqual(1, result["smp"])
def test_len(self):
self.assertEqual(len(SERVER_CONTEXT), len(self.c.all()))
def test_contains(self):
result = self.c.all()
- self.assertTrue('uuid' in result)
- self.assertFalse('uid' in result)
- self.assertTrue('meta' in result)
- self.assertFalse('ssh_public_key' in result)
+ self.assertTrue("uuid" in result)
+ self.assertFalse("uid" in result)
+ self.assertTrue("meta" in result)
+ self.assertFalse("ssh_public_key" in result)
def test_iter(self):
- self.assertEqual(sorted(SERVER_CONTEXT.keys()),
- sorted([key for key in self.c.all()]))
+ self.assertEqual(
+ sorted(SERVER_CONTEXT.keys()),
+ sorted([key for key in self.c.all()]),
+ )
def test_with_list_as_result(self):
- result = self.c.get('tags')
- self.assertEqual('much server', result[0])
- self.assertTrue('very performance' in result)
+ result = self.c.get("tags")
+ self.assertEqual("much server", result[0])
+ self.assertTrue("very performance" in result)
self.assertEqual(2, len(result))
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py
index fb2b55e8..a5018a42 100644
--- a/tests/unittests/test_data.py
+++ b/tests/unittests/test_data.py
@@ -5,39 +5,33 @@
import gzip
import logging
import os
-from io import BytesIO, StringIO
-from unittest import mock
-
from email import encoders
from email.mime.application import MIMEApplication
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
+from io import BytesIO, StringIO
+from unittest import mock
import httpretty
from cloudinit import handlers
from cloudinit import helpers as c_helpers
-from cloudinit import log
-from cloudinit.settings import (PER_INSTANCE)
-from cloudinit import sources
-from cloudinit import stages
+from cloudinit import log, safeyaml, sources, stages
from cloudinit import user_data as ud
-from cloudinit import safeyaml
from cloudinit import util
-
-from cloudinit.tests import helpers
-
+from cloudinit.settings import PER_INSTANCE
+from tests.unittests import helpers
INSTANCE_ID = "i-testing"
class FakeDataSource(sources.DataSource):
-
- def __init__(self, userdata=None, vendordata=None):
+ def __init__(self, userdata=None, vendordata=None, vendordata2=None):
sources.DataSource.__init__(self, {}, None, None)
- self.metadata = {'instance-id': INSTANCE_ID}
+ self.metadata = {"instance-id": INSTANCE_ID}
self.userdata_raw = userdata
self.vendordata_raw = vendordata
+ self.vendordata2_raw = vendordata2
def count_messages(root):
@@ -51,7 +45,7 @@ def count_messages(root):
def gzip_text(text):
contents = BytesIO()
- f = gzip.GzipFile(fileobj=contents, mode='wb')
+ f = gzip.GzipFile(fileobj=contents, mode="wb")
f.write(util.encode_text(text))
f.flush()
f.close()
@@ -61,7 +55,6 @@ def gzip_text(text):
# FIXME: these tests shouldn't be checking log output??
# Weirddddd...
class TestConsumeUserData(helpers.FilesystemMockingTestCase):
-
def setUp(self):
super(TestConsumeUserData, self).setUp()
self._log = None
@@ -86,13 +79,13 @@ class TestConsumeUserData(helpers.FilesystemMockingTestCase):
return log_file
def test_simple_jsonp(self):
- blob = '''
+ blob = """
#cloud-config-jsonp
[
{ "op": "add", "path": "/baz", "value": "qux" },
{ "op": "add", "path": "/bar", "value": "qux2" }
]
-'''
+"""
ci = stages.Init()
ci.datasource = FakeDataSource(blob)
@@ -102,64 +95,84 @@ class TestConsumeUserData(helpers.FilesystemMockingTestCase):
cc_contents = util.load_file(ci.paths.get_ipath("cloud_config"))
cc = util.load_yaml(cc_contents)
self.assertEqual(2, len(cc))
- self.assertEqual('qux', cc['baz'])
- self.assertEqual('qux2', cc['bar'])
+ self.assertEqual("qux", cc["baz"])
+ self.assertEqual("qux2", cc["bar"])
- def test_simple_jsonp_vendor_and_user(self):
+ def test_simple_jsonp_vendor_and_vendor2_and_user(self):
# test that user-data wins over vendor
- user_blob = '''
+ user_blob = """
#cloud-config-jsonp
[
{ "op": "add", "path": "/baz", "value": "qux" },
- { "op": "add", "path": "/bar", "value": "qux2" }
+ { "op": "add", "path": "/bar", "value": "qux2" },
+ { "op": "add", "path": "/foobar", "value": "qux3" }
]
-'''
- vendor_blob = '''
+"""
+ vendor_blob = """
#cloud-config-jsonp
[
{ "op": "add", "path": "/baz", "value": "quxA" },
{ "op": "add", "path": "/bar", "value": "quxB" },
- { "op": "add", "path": "/foo", "value": "quxC" }
+ { "op": "add", "path": "/foo", "value": "quxC" },
+ { "op": "add", "path": "/corge", "value": "quxEE" }
+]
+"""
+ vendor2_blob = """
+#cloud-config-jsonp
+[
+ { "op": "add", "path": "/corge", "value": "quxD" },
+ { "op": "add", "path": "/grault", "value": "quxFF" },
+ { "op": "add", "path": "/foobar", "value": "quxGG" }
]
-'''
+"""
self.reRoot()
initer = stages.Init()
- initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob)
+ initer.datasource = FakeDataSource(
+ user_blob, vendordata=vendor_blob, vendordata2=vendor2_blob
+ )
initer.read_cfg()
initer.initialize()
initer.fetch()
initer.instancify()
initer.update()
- initer.cloudify().run('consume_data',
- initer.consume_data,
- args=[PER_INSTANCE],
- freq=PER_INSTANCE)
+ initer.cloudify().run(
+ "consume_data",
+ initer.consume_data,
+ args=[PER_INSTANCE],
+ freq=PER_INSTANCE,
+ )
mods = stages.Modules(initer)
- (_which_ran, _failures) = mods.run_section('cloud_init_modules')
+ (_which_ran, _failures) = mods.run_section("cloud_init_modules")
cfg = mods.cfg
- self.assertIn('vendor_data', cfg)
- self.assertEqual('qux', cfg['baz'])
- self.assertEqual('qux2', cfg['bar'])
- self.assertEqual('quxC', cfg['foo'])
+ self.assertIn("vendor_data", cfg)
+ self.assertIn("vendor_data2", cfg)
+ # Confirm that vendordata2 overrides vendordata, and that
+ # userdata overrides both
+ self.assertEqual("qux", cfg["baz"])
+ self.assertEqual("qux2", cfg["bar"])
+ self.assertEqual("qux3", cfg["foobar"])
+ self.assertEqual("quxC", cfg["foo"])
+ self.assertEqual("quxD", cfg["corge"])
+ self.assertEqual("quxFF", cfg["grault"])
def test_simple_jsonp_no_vendor_consumed(self):
# make sure that vendor data is not consumed
- user_blob = '''
+ user_blob = """
#cloud-config-jsonp
[
{ "op": "add", "path": "/baz", "value": "qux" },
{ "op": "add", "path": "/bar", "value": "qux2" },
{ "op": "add", "path": "/vendor_data", "value": {"enabled": "false"}}
]
-'''
- vendor_blob = '''
+"""
+ vendor_blob = """
#cloud-config-jsonp
[
{ "op": "add", "path": "/baz", "value": "quxA" },
{ "op": "add", "path": "/bar", "value": "quxB" },
{ "op": "add", "path": "/foo", "value": "quxC" }
]
-'''
+"""
self.reRoot()
initer = stages.Init()
initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob)
@@ -168,35 +181,37 @@ class TestConsumeUserData(helpers.FilesystemMockingTestCase):
initer.fetch()
initer.instancify()
initer.update()
- initer.cloudify().run('consume_data',
- initer.consume_data,
- args=[PER_INSTANCE],
- freq=PER_INSTANCE)
+ initer.cloudify().run(
+ "consume_data",
+ initer.consume_data,
+ args=[PER_INSTANCE],
+ freq=PER_INSTANCE,
+ )
mods = stages.Modules(initer)
- (_which_ran, _failures) = mods.run_section('cloud_init_modules')
+ (_which_ran, _failures) = mods.run_section("cloud_init_modules")
cfg = mods.cfg
- self.assertEqual('qux', cfg['baz'])
- self.assertEqual('qux2', cfg['bar'])
- self.assertNotIn('foo', cfg)
+ self.assertEqual("qux", cfg["baz"])
+ self.assertEqual("qux2", cfg["bar"])
+ self.assertNotIn("foo", cfg)
def test_mixed_cloud_config(self):
- blob_cc = '''
+ blob_cc = """
#cloud-config
a: b
c: d
-'''
+"""
message_cc = MIMEBase("text", "cloud-config")
message_cc.set_payload(blob_cc)
- blob_jp = '''
+ blob_jp = """
#cloud-config-jsonp
[
{ "op": "replace", "path": "/a", "value": "c" },
{ "op": "remove", "path": "/c" }
]
-'''
+"""
- message_jp = MIMEBase('text', "cloud-config-jsonp")
+ message_jp = MIMEBase("text", "cloud-config-jsonp")
message_jp.set_payload(blob_jp)
message = MIMEMultipart()
@@ -211,26 +226,26 @@ c: d
cc_contents = util.load_file(ci.paths.get_ipath("cloud_config"))
cc = util.load_yaml(cc_contents)
self.assertEqual(1, len(cc))
- self.assertEqual('c', cc['a'])
+ self.assertEqual("c", cc["a"])
def test_cloud_config_as_x_shell_script(self):
- blob_cc = '''
+ blob_cc = """
#cloud-config
a: b
c: d
-'''
+"""
message_cc = MIMEBase("text", "x-shellscript")
message_cc.set_payload(blob_cc)
- blob_jp = '''
+ blob_jp = """
#cloud-config-jsonp
[
{ "op": "replace", "path": "/a", "value": "c" },
{ "op": "remove", "path": "/c" }
]
-'''
+"""
- message_jp = MIMEBase('text', "cloud-config-jsonp")
+ message_jp = MIMEBase("text", "cloud-config-jsonp")
message_jp.set_payload(blob_jp)
message = MIMEMultipart()
@@ -245,19 +260,19 @@ c: d
cc_contents = util.load_file(ci.paths.get_ipath("cloud_config"))
cc = util.load_yaml(cc_contents)
self.assertEqual(1, len(cc))
- self.assertEqual('c', cc['a'])
+ self.assertEqual("c", cc["a"])
def test_vendor_user_yaml_cloud_config(self):
- vendor_blob = '''
+ vendor_blob = """
#cloud-config
a: b
name: vendor
run:
- x
- y
-'''
+"""
- user_blob = '''
+ user_blob = """
#cloud-config
a: c
vendor_data:
@@ -266,7 +281,7 @@ vendor_data:
name: user
run:
- z
-'''
+"""
self.reRoot()
initer = stages.Init()
initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob)
@@ -275,108 +290,122 @@ run:
initer.fetch()
initer.instancify()
initer.update()
- initer.cloudify().run('consume_data',
- initer.consume_data,
- args=[PER_INSTANCE],
- freq=PER_INSTANCE)
+ initer.cloudify().run(
+ "consume_data",
+ initer.consume_data,
+ args=[PER_INSTANCE],
+ freq=PER_INSTANCE,
+ )
mods = stages.Modules(initer)
- (_which_ran, _failures) = mods.run_section('cloud_init_modules')
+ (_which_ran, _failures) = mods.run_section("cloud_init_modules")
cfg = mods.cfg
- self.assertIn('vendor_data', cfg)
- self.assertEqual('c', cfg['a'])
- self.assertEqual('user', cfg['name'])
- self.assertNotIn('x', cfg['run'])
- self.assertNotIn('y', cfg['run'])
- self.assertIn('z', cfg['run'])
+ self.assertIn("vendor_data", cfg)
+ self.assertEqual("c", cfg["a"])
+ self.assertEqual("user", cfg["name"])
+ self.assertNotIn("x", cfg["run"])
+ self.assertNotIn("y", cfg["run"])
+ self.assertIn("z", cfg["run"])
def test_vendordata_script(self):
- vendor_blob = '''
+ vendor_blob = """
#!/bin/bash
echo "test"
-'''
+"""
+ vendor2_blob = """
+#!/bin/bash
+echo "dynamic test"
+"""
- user_blob = '''
+ user_blob = """
#cloud-config
vendor_data:
enabled: True
prefix: /bin/true
-'''
+"""
new_root = self.reRoot()
initer = stages.Init()
- initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob)
+ initer.datasource = FakeDataSource(
+ user_blob, vendordata=vendor_blob, vendordata2=vendor2_blob
+ )
initer.read_cfg()
initer.initialize()
initer.fetch()
initer.instancify()
initer.update()
- initer.cloudify().run('consume_data',
- initer.consume_data,
- args=[PER_INSTANCE],
- freq=PER_INSTANCE)
+ initer.cloudify().run(
+ "consume_data",
+ initer.consume_data,
+ args=[PER_INSTANCE],
+ freq=PER_INSTANCE,
+ )
mods = stages.Modules(initer)
- (_which_ran, _failures) = mods.run_section('cloud_init_modules')
- vendor_script = initer.paths.get_ipath_cur('vendor_scripts')
+ (_which_ran, _failures) = mods.run_section("cloud_init_modules")
+ vendor_script = initer.paths.get_ipath_cur("vendor_scripts")
vendor_script_fns = "%s%s/part-001" % (new_root, vendor_script)
self.assertTrue(os.path.exists(vendor_script_fns))
def test_merging_cloud_config(self):
- blob = '''
+ blob = """
#cloud-config
a: b
e: f
run:
- b
- c
-'''
+"""
message1 = MIMEBase("text", "cloud-config")
message1.set_payload(blob)
- blob2 = '''
+ blob2 = """
#cloud-config
a: e
e: g
run:
- stuff
- morestuff
-'''
+"""
message2 = MIMEBase("text", "cloud-config")
- message2['X-Merge-Type'] = ('dict(recurse_array,'
- 'recurse_str)+list(append)+str(append)')
+ message2[
+ "X-Merge-Type"
+ ] = "dict(recurse_array,recurse_str)+list(append)+str(append)"
message2.set_payload(blob2)
- blob3 = '''
+ blob3 = """
#cloud-config
e:
- 1
- 2
- 3
p: 1
-'''
+"""
message3 = MIMEBase("text", "cloud-config")
message3.set_payload(blob3)
messages = [message1, message2, message3]
- paths = c_helpers.Paths({}, ds=FakeDataSource(''))
+ paths = c_helpers.Paths({}, ds=FakeDataSource(""))
cloud_cfg = handlers.cloud_config.CloudConfigPartHandler(paths)
self.reRoot()
- cloud_cfg.handle_part(None, handlers.CONTENT_START, None, None, None,
- None)
+ cloud_cfg.handle_part(
+ None, handlers.CONTENT_START, None, None, None, None
+ )
for i, m in enumerate(messages):
headers = dict(m)
fn = "part-%s" % (i + 1)
payload = m.get_payload(decode=True)
- cloud_cfg.handle_part(None, headers['Content-Type'],
- fn, payload, None, headers)
- cloud_cfg.handle_part(None, handlers.CONTENT_END, None, None, None,
- None)
- contents = util.load_file(paths.get_ipath('cloud_config'))
+ cloud_cfg.handle_part(
+ None, headers["Content-Type"], fn, payload, None, headers
+ )
+ cloud_cfg.handle_part(
+ None, handlers.CONTENT_END, None, None, None, None
+ )
+ contents = util.load_file(paths.get_ipath("cloud_config"))
contents = util.load_yaml(contents)
- self.assertEqual(contents['run'], ['b', 'c', 'stuff', 'morestuff'])
- self.assertEqual(contents['a'], 'be')
- self.assertEqual(contents['e'], [1, 2, 3])
- self.assertEqual(contents['p'], 1)
+ self.assertEqual(contents["run"], ["b", "c", "stuff", "morestuff"])
+ self.assertEqual(contents["a"], "be")
+ self.assertEqual(contents["e"], [1, 2, 3])
+ self.assertEqual(contents["p"], 1)
def test_unhandled_type_warning(self):
"""Raw text without magic is ignored but shows warning."""
@@ -385,35 +414,37 @@ p: 1
data = "arbitrary text\n"
ci.datasource = FakeDataSource(data)
- with mock.patch('cloudinit.util.write_file') as mockobj:
+ with mock.patch("cloudinit.util.write_file") as mockobj:
log_file = self.capture_log(logging.WARNING)
ci.fetch()
ci.consume_data()
self.assertIn(
"Unhandled non-multipart (text/x-not-multipart) userdata:",
- log_file.getvalue())
+ log_file.getvalue(),
+ )
mockobj.assert_called_once_with(
- ci.paths.get_ipath("cloud_config"), "", 0o600)
+ ci.paths.get_ipath("cloud_config"), "", 0o600
+ )
def test_mime_gzip_compressed(self):
"""Tests that individual message gzip encoding works."""
def gzip_part(text):
- return MIMEApplication(gzip_text(text), 'gzip')
+ return MIMEApplication(gzip_text(text), "gzip")
- base_content1 = '''
+ base_content1 = """
#cloud-config
a: 2
-'''
+"""
- base_content2 = '''
+ base_content2 = """
#cloud-config
b: 3
c: 4
-'''
+"""
- message = MIMEMultipart('test')
+ message = MIMEMultipart("test")
message.attach(gzip_part(base_content1))
message.attach(gzip_part(base_content2))
ci = stages.Init()
@@ -425,9 +456,9 @@ c: 4
contents = util.load_yaml(contents)
self.assertTrue(isinstance(contents, dict))
self.assertEqual(3, len(contents))
- self.assertEqual(2, contents['a'])
- self.assertEqual(3, contents['b'])
- self.assertEqual(4, contents['c'])
+ self.assertEqual(2, contents["a"])
+ self.assertEqual(3, contents["b"])
+ self.assertEqual(4, contents["c"])
def test_mime_text_plain(self):
"""Mime message of type text/plain is ignored but shows warning."""
@@ -437,15 +468,17 @@ c: 4
message.set_payload("Just text")
ci.datasource = FakeDataSource(message.as_string().encode())
- with mock.patch('cloudinit.util.write_file') as mockobj:
+ with mock.patch("cloudinit.util.write_file") as mockobj:
log_file = self.capture_log(logging.WARNING)
ci.fetch()
ci.consume_data()
self.assertIn(
"Unhandled unknown content-type (text/plain)",
- log_file.getvalue())
+ log_file.getvalue(),
+ )
mockobj.assert_called_once_with(
- ci.paths.get_ipath("cloud_config"), "", 0o600)
+ ci.paths.get_ipath("cloud_config"), "", 0o600
+ )
def test_shellscript(self):
"""Raw text starting #!/bin/sh is treated as script."""
@@ -456,15 +489,18 @@ c: 4
outpath = os.path.join(ci.paths.get_ipath_cur("scripts"), "part-001")
- with mock.patch('cloudinit.util.write_file') as mockobj:
+ with mock.patch("cloudinit.util.write_file") as mockobj:
log_file = self.capture_log(logging.WARNING)
ci.fetch()
ci.consume_data()
self.assertEqual("", log_file.getvalue())
- mockobj.assert_has_calls([
- mock.call(outpath, script, 0o700),
- mock.call(ci.paths.get_ipath("cloud_config"), "", 0o600)])
+ mockobj.assert_has_calls(
+ [
+ mock.call(outpath, script, 0o700),
+ mock.call(ci.paths.get_ipath("cloud_config"), "", 0o600),
+ ]
+ )
def test_mime_text_x_shellscript(self):
"""Mime message of type text/x-shellscript is treated as script."""
@@ -477,15 +513,18 @@ c: 4
outpath = os.path.join(ci.paths.get_ipath_cur("scripts"), "part-001")
- with mock.patch('cloudinit.util.write_file') as mockobj:
+ with mock.patch("cloudinit.util.write_file") as mockobj:
log_file = self.capture_log(logging.WARNING)
ci.fetch()
ci.consume_data()
self.assertEqual("", log_file.getvalue())
- mockobj.assert_has_calls([
- mock.call(outpath, script, 0o700),
- mock.call(ci.paths.get_ipath("cloud_config"), "", 0o600)])
+ mockobj.assert_has_calls(
+ [
+ mock.call(outpath, script, 0o700),
+ mock.call(ci.paths.get_ipath("cloud_config"), "", 0o600),
+ ]
+ )
def test_mime_text_plain_shell(self):
"""Mime type text/plain starting #!/bin/sh is treated as script."""
@@ -498,41 +537,48 @@ c: 4
outpath = os.path.join(ci.paths.get_ipath_cur("scripts"), "part-001")
- with mock.patch('cloudinit.util.write_file') as mockobj:
+ with mock.patch("cloudinit.util.write_file") as mockobj:
log_file = self.capture_log(logging.WARNING)
ci.fetch()
ci.consume_data()
self.assertEqual("", log_file.getvalue())
- mockobj.assert_has_calls([
- mock.call(outpath, script, 0o700),
- mock.call(ci.paths.get_ipath("cloud_config"), "", 0o600)])
+ mockobj.assert_has_calls(
+ [
+ mock.call(outpath, script, 0o700),
+ mock.call(ci.paths.get_ipath("cloud_config"), "", 0o600),
+ ]
+ )
def test_mime_application_octet_stream(self):
"""Mime type application/octet-stream is ignored but shows warning."""
self.reRoot()
ci = stages.Init()
message = MIMEBase("application", "octet-stream")
- message.set_payload(b'\xbf\xe6\xb2\xc3\xd3\xba\x13\xa4\xd8\xa1\xcc')
+ message.set_payload(b"\xbf\xe6\xb2\xc3\xd3\xba\x13\xa4\xd8\xa1\xcc")
encoders.encode_base64(message)
ci.datasource = FakeDataSource(message.as_string().encode())
- with mock.patch('cloudinit.util.write_file') as mockobj:
+ with mock.patch("cloudinit.util.write_file") as mockobj:
log_file = self.capture_log(logging.WARNING)
ci.fetch()
ci.consume_data()
self.assertIn(
"Unhandled unknown content-type (application/octet-stream)",
- log_file.getvalue())
+ log_file.getvalue(),
+ )
mockobj.assert_called_once_with(
- ci.paths.get_ipath("cloud_config"), "", 0o600)
+ ci.paths.get_ipath("cloud_config"), "", 0o600
+ )
def test_cloud_config_archive(self):
- non_decodable = b'\x11\xc9\xb4gTH\xee\x12'
- data = [{'content': '#cloud-config\npassword: gocubs\n'},
- {'content': '#cloud-config\nlocale: chicago\n'},
- {'content': non_decodable}]
- message = b'#cloud-config-archive\n' + safeyaml.dumps(data).encode()
+ non_decodable = b"\x11\xc9\xb4gTH\xee\x12"
+ data = [
+ {"content": "#cloud-config\npassword: gocubs\n"},
+ {"content": "#cloud-config\nlocale: chicago\n"},
+ {"content": non_decodable},
+ ]
+ message = b"#cloud-config-archive\n" + safeyaml.dumps(data).encode()
self.reRoot()
ci = stages.Init()
@@ -545,35 +591,35 @@ c: 4
# consuming the user-data provided should write 'cloud_config' file
# which will have our yaml in it.
- with mock.patch('cloudinit.util.write_file') as mockobj:
+ with mock.patch("cloudinit.util.write_file") as mockobj:
mockobj.side_effect = fsstore
ci.fetch()
ci.consume_data()
cfg = util.load_yaml(fs[ci.paths.get_ipath("cloud_config")])
- self.assertEqual(cfg.get('password'), 'gocubs')
- self.assertEqual(cfg.get('locale'), 'chicago')
+ self.assertEqual(cfg.get("password"), "gocubs")
+ self.assertEqual(cfg.get("locale"), "chicago")
- @mock.patch('cloudinit.util.read_conf_with_confd')
+ @mock.patch("cloudinit.util.read_conf_with_confd")
def test_dont_allow_user_data(self, mock_cfg):
mock_cfg.return_value = {"allow_userdata": False}
# test that user-data is ignored but vendor-data is kept
- user_blob = '''
+ user_blob = """
#cloud-config-jsonp
[
{ "op": "add", "path": "/baz", "value": "qux" },
{ "op": "add", "path": "/bar", "value": "qux2" }
]
-'''
- vendor_blob = '''
+"""
+ vendor_blob = """
#cloud-config-jsonp
[
{ "op": "add", "path": "/baz", "value": "quxA" },
{ "op": "add", "path": "/bar", "value": "quxB" },
{ "op": "add", "path": "/foo", "value": "quxC" }
]
-'''
+"""
self.reRoot()
initer = stages.Init()
initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob)
@@ -582,21 +628,22 @@ c: 4
initer.fetch()
initer.instancify()
initer.update()
- initer.cloudify().run('consume_data',
- initer.consume_data,
- args=[PER_INSTANCE],
- freq=PER_INSTANCE)
+ initer.cloudify().run(
+ "consume_data",
+ initer.consume_data,
+ args=[PER_INSTANCE],
+ freq=PER_INSTANCE,
+ )
mods = stages.Modules(initer)
- (_which_ran, _failures) = mods.run_section('cloud_init_modules')
+ (_which_ran, _failures) = mods.run_section("cloud_init_modules")
cfg = mods.cfg
- self.assertIn('vendor_data', cfg)
- self.assertEqual('quxA', cfg['baz'])
- self.assertEqual('quxB', cfg['bar'])
- self.assertEqual('quxC', cfg['foo'])
+ self.assertIn("vendor_data", cfg)
+ self.assertEqual("quxA", cfg["baz"])
+ self.assertEqual("quxB", cfg["bar"])
+ self.assertEqual("quxC", cfg["foo"])
class TestConsumeUserDataHttp(TestConsumeUserData, helpers.HttprettyTestCase):
-
def setUp(self):
TestConsumeUserData.setUp(self)
helpers.HttprettyTestCase.setUp(self)
@@ -605,14 +652,14 @@ class TestConsumeUserDataHttp(TestConsumeUserData, helpers.HttprettyTestCase):
TestConsumeUserData.tearDown(self)
helpers.HttprettyTestCase.tearDown(self)
- @mock.patch('cloudinit.url_helper.time.sleep')
+ @mock.patch("cloudinit.url_helper.time.sleep")
def test_include(self, mock_sleep):
"""Test #include."""
- included_url = 'http://hostname/path'
- included_data = '#cloud-config\nincluded: true\n'
+ included_url = "http://hostname/path"
+ included_data = "#cloud-config\nincluded: true\n"
httpretty.register_uri(httpretty.GET, included_url, included_data)
- blob = '#include\n%s\n' % included_url
+ blob = "#include\n%s\n" % included_url
self.reRoot()
ci = stages.Init()
@@ -621,20 +668,20 @@ class TestConsumeUserDataHttp(TestConsumeUserData, helpers.HttprettyTestCase):
ci.consume_data()
cc_contents = util.load_file(ci.paths.get_ipath("cloud_config"))
cc = util.load_yaml(cc_contents)
- self.assertTrue(cc.get('included'))
+ self.assertTrue(cc.get("included"))
- @mock.patch('cloudinit.url_helper.time.sleep')
+ @mock.patch("cloudinit.url_helper.time.sleep")
def test_include_bad_url(self, mock_sleep):
"""Test #include with a bad URL."""
- bad_url = 'http://bad/forbidden'
- bad_data = '#cloud-config\nbad: true\n'
+ bad_url = "http://bad/forbidden"
+ bad_data = "#cloud-config\nbad: true\n"
httpretty.register_uri(httpretty.GET, bad_url, bad_data, status=403)
- included_url = 'http://hostname/path'
- included_data = '#cloud-config\nincluded: true\n'
+ included_url = "http://hostname/path"
+ included_data = "#cloud-config\nincluded: true\n"
httpretty.register_uri(httpretty.GET, included_url, included_data)
- blob = '#include\n%s\n%s' % (bad_url, included_url)
+ blob = "#include\n%s\n%s" % (bad_url, included_url)
self.reRoot()
ci = stages.Init()
@@ -642,26 +689,26 @@ class TestConsumeUserDataHttp(TestConsumeUserData, helpers.HttprettyTestCase):
ci.fetch()
with self.assertRaises(Exception) as context:
ci.consume_data()
- self.assertIn('403', str(context.exception))
+ self.assertIn("403", str(context.exception))
with self.assertRaises(FileNotFoundError):
util.load_file(ci.paths.get_ipath("cloud_config"))
- @mock.patch('cloudinit.url_helper.time.sleep')
+ @mock.patch("cloudinit.url_helper.time.sleep")
@mock.patch(
"cloudinit.user_data.features.ERROR_ON_USER_DATA_FAILURE", False
)
def test_include_bad_url_no_fail(self, mock_sleep):
"""Test #include with a bad URL and failure disabled"""
- bad_url = 'http://bad/forbidden'
- bad_data = '#cloud-config\nbad: true\n'
+ bad_url = "http://bad/forbidden"
+ bad_data = "#cloud-config\nbad: true\n"
httpretty.register_uri(httpretty.GET, bad_url, bad_data, status=403)
- included_url = 'http://hostname/path'
- included_data = '#cloud-config\nincluded: true\n'
+ included_url = "http://hostname/path"
+ included_data = "#cloud-config\nincluded: true\n"
httpretty.register_uri(httpretty.GET, included_url, included_data)
- blob = '#include\n%s\n%s' % (bad_url, included_url)
+ blob = "#include\n%s\n%s" % (bad_url, included_url)
self.reRoot()
ci = stages.Init()
@@ -670,32 +717,33 @@ class TestConsumeUserDataHttp(TestConsumeUserData, helpers.HttprettyTestCase):
ci.fetch()
ci.consume_data()
- self.assertIn("403 Client Error: Forbidden for url: %s" % bad_url,
- log_file.getvalue())
+ self.assertIn(
+ "403 Client Error: Forbidden for url: %s" % bad_url,
+ log_file.getvalue(),
+ )
cc_contents = util.load_file(ci.paths.get_ipath("cloud_config"))
cc = util.load_yaml(cc_contents)
- self.assertIsNone(cc.get('bad'))
- self.assertTrue(cc.get('included'))
+ self.assertIsNone(cc.get("bad"))
+ self.assertTrue(cc.get("included"))
class TestUDProcess(helpers.ResourceUsingTestCase):
-
def test_bytes_in_userdata(self):
- msg = b'#cloud-config\napt_update: True\n'
+ msg = b"#cloud-config\napt_update: True\n"
ud_proc = ud.UserDataProcessor(self.getCloudPaths())
message = ud_proc.process(msg)
self.assertTrue(count_messages(message) == 1)
def test_string_in_userdata(self):
- msg = '#cloud-config\napt_update: True\n'
+ msg = "#cloud-config\napt_update: True\n"
ud_proc = ud.UserDataProcessor(self.getCloudPaths())
message = ud_proc.process(msg)
self.assertTrue(count_messages(message) == 1)
def test_compressed_in_userdata(self):
- msg = gzip_text('#cloud-config\napt_update: True\n')
+ msg = gzip_text("#cloud-config\napt_update: True\n")
ud_proc = ud.UserDataProcessor(self.getCloudPaths())
message = ud_proc.process(msg)
@@ -703,15 +751,14 @@ class TestUDProcess(helpers.ResourceUsingTestCase):
class TestConvertString(helpers.TestCase):
-
def test_handles_binary_non_utf8_decodable(self):
"""Printable unicode (not utf8-decodable) is safely converted."""
- blob = b'#!/bin/bash\necho \xc3\x84\n'
+ blob = b"#!/bin/bash\necho \xc3\x84\n"
msg = ud.convert_string(blob)
self.assertEqual(blob, msg.get_payload(decode=True))
def test_handles_binary_utf8_decodable(self):
- blob = b'\x32\x32'
+ blob = b"\x32\x32"
msg = ud.convert_string(blob)
self.assertEqual(blob, msg.get_payload(decode=True))
@@ -731,24 +778,31 @@ class TestConvertString(helpers.TestCase):
class TestFetchBaseConfig(helpers.TestCase):
def test_only_builtin_gets_builtin(self):
ret = helpers.wrap_and_call(
- 'cloudinit.stages',
- {'util.read_conf_with_confd': None,
- 'util.read_conf_from_cmdline': None,
- 'read_runtime_config': {'return_value': {}}},
- stages.fetch_base_config)
+ "cloudinit.stages",
+ {
+ "util.read_conf_with_confd": None,
+ "util.read_conf_from_cmdline": None,
+ "read_runtime_config": {"return_value": {}},
+ },
+ stages.fetch_base_config,
+ )
self.assertEqual(util.get_builtin_cfg(), ret)
def test_conf_d_overrides_defaults(self):
builtin = util.get_builtin_cfg()
test_key = sorted(builtin)[0]
- test_value = 'test'
+ test_value = "test"
ret = helpers.wrap_and_call(
- 'cloudinit.stages',
- {'util.read_conf_with_confd':
- {'return_value': {test_key: test_value}},
- 'util.read_conf_from_cmdline': None,
- 'read_runtime_config': {'return_value': {}}},
- stages.fetch_base_config)
+ "cloudinit.stages",
+ {
+ "util.read_conf_with_confd": {
+ "return_value": {test_key: test_value}
+ },
+ "util.read_conf_from_cmdline": None,
+ "read_runtime_config": {"return_value": {}},
+ },
+ stages.fetch_base_config,
+ )
self.assertEqual(ret.get(test_key), test_value)
builtin[test_key] = test_value
self.assertEqual(ret, builtin)
@@ -756,47 +810,64 @@ class TestFetchBaseConfig(helpers.TestCase):
def test_cmdline_overrides_defaults(self):
builtin = util.get_builtin_cfg()
test_key = sorted(builtin)[0]
- test_value = 'test'
+ test_value = "test"
cmdline = {test_key: test_value}
ret = helpers.wrap_and_call(
- 'cloudinit.stages',
- {'util.read_conf_from_cmdline': {'return_value': cmdline},
- 'util.read_conf_with_confd': None,
- 'read_runtime_config': None},
- stages.fetch_base_config)
+ "cloudinit.stages",
+ {
+ "util.read_conf_from_cmdline": {"return_value": cmdline},
+ "util.read_conf_with_confd": None,
+ "read_runtime_config": None,
+ },
+ stages.fetch_base_config,
+ )
self.assertEqual(ret.get(test_key), test_value)
builtin[test_key] = test_value
self.assertEqual(ret, builtin)
def test_cmdline_overrides_confd_runtime_and_defaults(self):
- builtin = {'key1': 'value0', 'key3': 'other2'}
- conf_d = {'key1': 'value1', 'key2': 'other1'}
- cmdline = {'key3': 'other3', 'key2': 'other2'}
- runtime = {'key3': 'runtime3'}
+ builtin = {"key1": "value0", "key3": "other2"}
+ conf_d = {"key1": "value1", "key2": "other1"}
+ cmdline = {"key3": "other3", "key2": "other2"}
+ runtime = {"key3": "runtime3"}
ret = helpers.wrap_and_call(
- 'cloudinit.stages',
- {'util.read_conf_with_confd': {'return_value': conf_d},
- 'util.get_builtin_cfg': {'return_value': builtin},
- 'read_runtime_config': {'return_value': runtime},
- 'util.read_conf_from_cmdline': {'return_value': cmdline}},
- stages.fetch_base_config)
- self.assertEqual(ret, {'key1': 'value1', 'key2': 'other2',
- 'key3': 'other3'})
+ "cloudinit.stages",
+ {
+ "util.read_conf_with_confd": {"return_value": conf_d},
+ "util.get_builtin_cfg": {"return_value": builtin},
+ "read_runtime_config": {"return_value": runtime},
+ "util.read_conf_from_cmdline": {"return_value": cmdline},
+ },
+ stages.fetch_base_config,
+ )
+ self.assertEqual(
+ ret, {"key1": "value1", "key2": "other2", "key3": "other3"}
+ )
def test_order_precedence_is_builtin_system_runtime_cmdline(self):
- builtin = {'key1': 'builtin0', 'key3': 'builtin3'}
- conf_d = {'key1': 'confd1', 'key2': 'confd2', 'keyconfd1': 'kconfd1'}
- runtime = {'key1': 'runtime1', 'key2': 'runtime2'}
- cmdline = {'key1': 'cmdline1'}
+ builtin = {"key1": "builtin0", "key3": "builtin3"}
+ conf_d = {"key1": "confd1", "key2": "confd2", "keyconfd1": "kconfd1"}
+ runtime = {"key1": "runtime1", "key2": "runtime2"}
+ cmdline = {"key1": "cmdline1"}
ret = helpers.wrap_and_call(
- 'cloudinit.stages',
- {'util.read_conf_with_confd': {'return_value': conf_d},
- 'util.get_builtin_cfg': {'return_value': builtin},
- 'util.read_conf_from_cmdline': {'return_value': cmdline},
- 'read_runtime_config': {'return_value': runtime},
- },
- stages.fetch_base_config)
- self.assertEqual(ret, {'key1': 'cmdline1', 'key2': 'runtime2',
- 'key3': 'builtin3', 'keyconfd1': 'kconfd1'})
+ "cloudinit.stages",
+ {
+ "util.read_conf_with_confd": {"return_value": conf_d},
+ "util.get_builtin_cfg": {"return_value": builtin},
+ "util.read_conf_from_cmdline": {"return_value": cmdline},
+ "read_runtime_config": {"return_value": runtime},
+ },
+ stages.fetch_base_config,
+ )
+ self.assertEqual(
+ ret,
+ {
+ "key1": "cmdline1",
+ "key2": "runtime2",
+ "key3": "builtin3",
+ "keyconfd1": "kconfd1",
+ },
+ )
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_aliyun.py b/tests/unittests/test_datasource/test_aliyun.py
deleted file mode 100644
index eb2828d5..00000000
--- a/tests/unittests/test_datasource/test_aliyun.py
+++ /dev/null
@@ -1,218 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import functools
-import httpretty
-import os
-from unittest import mock
-
-from cloudinit import helpers
-from cloudinit.sources import DataSourceAliYun as ay
-from cloudinit.tests import helpers as test_helpers
-
-DEFAULT_METADATA = {
- 'instance-id': 'aliyun-test-vm-00',
- 'eipv4': '10.0.0.1',
- 'hostname': 'test-hostname',
- 'image-id': 'm-test',
- 'launch-index': '0',
- 'mac': '00:16:3e:00:00:00',
- 'network-type': 'vpc',
- 'private-ipv4': '192.168.0.1',
- 'serial-number': 'test-string',
- 'vpc-cidr-block': '192.168.0.0/16',
- 'vpc-id': 'test-vpc',
- 'vswitch-id': 'test-vpc',
- 'vswitch-cidr-block': '192.168.0.0/16',
- 'zone-id': 'test-zone-1',
- 'ntp-conf': {'ntp_servers': [
- 'ntp1.aliyun.com',
- 'ntp2.aliyun.com',
- 'ntp3.aliyun.com']},
- 'source-address': ['http://mirrors.aliyun.com',
- 'http://mirrors.aliyuncs.com'],
- 'public-keys': {'key-pair-1': {'openssh-key': 'ssh-rsa AAAAB3...'},
- 'key-pair-2': {'openssh-key': 'ssh-rsa AAAAB3...'}}
-}
-
-DEFAULT_USERDATA = """\
-#cloud-config
-
-hostname: localhost"""
-
-
-def register_mock_metaserver(base_url, data):
- def register_helper(register, base_url, body):
- if isinstance(body, str):
- register(base_url, body)
- elif isinstance(body, list):
- register(base_url.rstrip('/'), '\n'.join(body) + '\n')
- elif isinstance(body, dict):
- if not body:
- register(base_url.rstrip('/') + '/', 'not found',
- status_code=404)
- vals = []
- for k, v in body.items():
- if isinstance(v, (str, list)):
- suffix = k.rstrip('/')
- else:
- suffix = k.rstrip('/') + '/'
- vals.append(suffix)
- url = base_url.rstrip('/') + '/' + suffix
- register_helper(register, url, v)
- register(base_url, '\n'.join(vals) + '\n')
-
- register = functools.partial(httpretty.register_uri, httpretty.GET)
- register_helper(register, base_url, data)
-
-
-class TestAliYunDatasource(test_helpers.HttprettyTestCase):
- def setUp(self):
- super(TestAliYunDatasource, self).setUp()
- cfg = {'datasource': {'AliYun': {'timeout': '1', 'max_wait': '1'}}}
- distro = {}
- paths = helpers.Paths({'run_dir': self.tmp_dir()})
- self.ds = ay.DataSourceAliYun(cfg, distro, paths)
- self.metadata_address = self.ds.metadata_urls[0]
-
- @property
- def default_metadata(self):
- return DEFAULT_METADATA
-
- @property
- def default_userdata(self):
- return DEFAULT_USERDATA
-
- @property
- def metadata_url(self):
- return os.path.join(
- self.metadata_address,
- self.ds.min_metadata_version, 'meta-data') + '/'
-
- @property
- def userdata_url(self):
- return os.path.join(
- self.metadata_address,
- self.ds.min_metadata_version, 'user-data')
-
- # EC2 provides an instance-identity document which must return 404 here
- # for this test to pass.
- @property
- def default_identity(self):
- return {}
-
- @property
- def identity_url(self):
- return os.path.join(self.metadata_address,
- self.ds.min_metadata_version,
- 'dynamic', 'instance-identity')
-
- def regist_default_server(self):
- register_mock_metaserver(self.metadata_url, self.default_metadata)
- register_mock_metaserver(self.userdata_url, self.default_userdata)
- register_mock_metaserver(self.identity_url, self.default_identity)
-
- def _test_get_data(self):
- self.assertEqual(self.ds.metadata, self.default_metadata)
- self.assertEqual(self.ds.userdata_raw,
- self.default_userdata.encode('utf8'))
-
- def _test_get_sshkey(self):
- pub_keys = [v['openssh-key'] for (_, v) in
- self.default_metadata['public-keys'].items()]
- self.assertEqual(self.ds.get_public_ssh_keys(), pub_keys)
-
- def _test_get_iid(self):
- self.assertEqual(self.default_metadata['instance-id'],
- self.ds.get_instance_id())
-
- def _test_host_name(self):
- self.assertEqual(self.default_metadata['hostname'],
- self.ds.get_hostname())
-
- @mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun")
- def test_with_mock_server(self, m_is_aliyun):
- m_is_aliyun.return_value = True
- self.regist_default_server()
- ret = self.ds.get_data()
- self.assertEqual(True, ret)
- self.assertEqual(1, m_is_aliyun.call_count)
- self._test_get_data()
- self._test_get_sshkey()
- self._test_get_iid()
- self._test_host_name()
- self.assertEqual('aliyun', self.ds.cloud_name)
- self.assertEqual('ec2', self.ds.platform)
- self.assertEqual(
- 'metadata (http://100.100.100.200)', self.ds.subplatform)
-
- @mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun")
- def test_returns_false_when_not_on_aliyun(self, m_is_aliyun):
- """If is_aliyun returns false, then get_data should return False."""
- m_is_aliyun.return_value = False
- self.regist_default_server()
- ret = self.ds.get_data()
- self.assertEqual(1, m_is_aliyun.call_count)
- self.assertEqual(False, ret)
-
- def test_parse_public_keys(self):
- public_keys = {}
- self.assertEqual(ay.parse_public_keys(public_keys), [])
-
- public_keys = {'key-pair-0': 'ssh-key-0'}
- self.assertEqual(ay.parse_public_keys(public_keys),
- [public_keys['key-pair-0']])
-
- public_keys = {'key-pair-0': 'ssh-key-0', 'key-pair-1': 'ssh-key-1'}
- self.assertEqual(set(ay.parse_public_keys(public_keys)),
- set([public_keys['key-pair-0'],
- public_keys['key-pair-1']]))
-
- public_keys = {'key-pair-0': ['ssh-key-0', 'ssh-key-1']}
- self.assertEqual(ay.parse_public_keys(public_keys),
- public_keys['key-pair-0'])
-
- public_keys = {'key-pair-0': {'openssh-key': []}}
- self.assertEqual(ay.parse_public_keys(public_keys), [])
-
- public_keys = {'key-pair-0': {'openssh-key': 'ssh-key-0'}}
- self.assertEqual(ay.parse_public_keys(public_keys),
- [public_keys['key-pair-0']['openssh-key']])
-
- public_keys = {'key-pair-0': {'openssh-key': ['ssh-key-0',
- 'ssh-key-1']}}
- self.assertEqual(ay.parse_public_keys(public_keys),
- public_keys['key-pair-0']['openssh-key'])
-
-
-class TestIsAliYun(test_helpers.CiTestCase):
- ALIYUN_PRODUCT = 'Alibaba Cloud ECS'
- read_dmi_data_expected = [mock.call('system-product-name')]
-
- @mock.patch("cloudinit.sources.DataSourceAliYun.dmi.read_dmi_data")
- def test_true_on_aliyun_product(self, m_read_dmi_data):
- """Should return true if the dmi product data has expected value."""
- m_read_dmi_data.return_value = self.ALIYUN_PRODUCT
- ret = ay._is_aliyun()
- self.assertEqual(self.read_dmi_data_expected,
- m_read_dmi_data.call_args_list)
- self.assertEqual(True, ret)
-
- @mock.patch("cloudinit.sources.DataSourceAliYun.dmi.read_dmi_data")
- def test_false_on_empty_string(self, m_read_dmi_data):
- """Should return false on empty value returned."""
- m_read_dmi_data.return_value = ""
- ret = ay._is_aliyun()
- self.assertEqual(self.read_dmi_data_expected,
- m_read_dmi_data.call_args_list)
- self.assertEqual(False, ret)
-
- @mock.patch("cloudinit.sources.DataSourceAliYun.dmi.read_dmi_data")
- def test_false_on_unknown_string(self, m_read_dmi_data):
- """Should return false on an unrelated string."""
- m_read_dmi_data.return_value = "cubs win"
- ret = ay._is_aliyun()
- self.assertEqual(self.read_dmi_data_expected,
- m_read_dmi_data.call_args_list)
- self.assertEqual(False, ret)
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
deleted file mode 100644
index e363c1f9..00000000
--- a/tests/unittests/test_datasource/test_azure.py
+++ /dev/null
@@ -1,2999 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import url_helper
-from cloudinit.sources import (
- UNSET, DataSourceAzure as dsaz, InvalidMetaDataException)
-from cloudinit.util import (b64e, decode_binary, load_file, write_file,
- MountFailedError, json_dumps, load_json)
-from cloudinit.version import version_string as vs
-from cloudinit.tests.helpers import (
- HttprettyTestCase, CiTestCase, populate_dir, mock, wrap_and_call,
- ExitStack, resourceLocation)
-from cloudinit.sources.helpers import netlink
-
-import copy
-import crypt
-import httpretty
-import json
-import os
-import requests
-import stat
-import xml.etree.ElementTree as ET
-import yaml
-
-
-def construct_valid_ovf_env(data=None, pubkeys=None,
- userdata=None, platform_settings=None):
- if data is None:
- data = {'HostName': 'FOOHOST'}
- if pubkeys is None:
- pubkeys = {}
-
- content = """<?xml version="1.0" encoding="utf-8"?>
-<Environment xmlns="http://schemas.dmtf.org/ovf/environment/1"
- xmlns:oe="http://schemas.dmtf.org/ovf/environment/1"
- xmlns:wa="http://schemas.microsoft.com/windowsazure"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
-
- <wa:ProvisioningSection><wa:Version>1.0</wa:Version>
- <LinuxProvisioningConfigurationSet
- xmlns="http://schemas.microsoft.com/windowsazure"
- xmlns:i="http://www.w3.org/2001/XMLSchema-instance">
- <ConfigurationSetType>LinuxProvisioningConfiguration</ConfigurationSetType>
- """
- for key, dval in data.items():
- if isinstance(dval, dict):
- val = dict(dval).get('text')
- attrs = ' ' + ' '.join(["%s='%s'" % (k, v) for k, v
- in dict(dval).items() if k != 'text'])
- else:
- val = dval
- attrs = ""
- content += "<%s%s>%s</%s>\n" % (key, attrs, val, key)
-
- if userdata:
- content += "<UserData>%s</UserData>\n" % (b64e(userdata))
-
- if pubkeys:
- content += "<SSH><PublicKeys>\n"
- for fp, path, value in pubkeys:
- content += " <PublicKey>"
- if fp and path:
- content += ("<Fingerprint>%s</Fingerprint><Path>%s</Path>" %
- (fp, path))
- if value:
- content += "<Value>%s</Value>" % value
- content += "</PublicKey>\n"
- content += "</PublicKeys></SSH>"
- content += """
- </LinuxProvisioningConfigurationSet>
- </wa:ProvisioningSection>
- <wa:PlatformSettingsSection><wa:Version>1.0</wa:Version>
- <PlatformSettings xmlns="http://schemas.microsoft.com/windowsazure"
- xmlns:i="http://www.w3.org/2001/XMLSchema-instance">
- <KmsServerHostname>kms.core.windows.net</KmsServerHostname>
- <ProvisionGuestAgent>false</ProvisionGuestAgent>
- <GuestAgentPackageName i:nil="true" />"""
- if platform_settings:
- for k, v in platform_settings.items():
- content += "<%s>%s</%s>\n" % (k, v, k)
- if "PreprovisionedVMType" not in platform_settings:
- content += """<PreprovisionedVMType i:nil="true" />"""
- content += """</PlatformSettings></wa:PlatformSettingsSection>
-</Environment>"""
-
- return content
-
-
-NETWORK_METADATA = {
- "compute": {
- "location": "eastus2",
- "name": "my-hostname",
- "offer": "UbuntuServer",
- "osType": "Linux",
- "placementGroupId": "",
- "platformFaultDomain": "0",
- "platformUpdateDomain": "0",
- "publisher": "Canonical",
- "resourceGroupName": "srugroup1",
- "sku": "19.04-DAILY",
- "subscriptionId": "12aad61c-6de4-4e53-a6c6-5aff52a83777",
- "tags": "",
- "version": "19.04.201906190",
- "vmId": "ff702a6b-cb6a-4fcd-ad68-b4ce38227642",
- "vmScaleSetName": "",
- "vmSize": "Standard_DS1_v2",
- "zone": "",
- "publicKeys": [
- {
- "keyData": "key1",
- "path": "path1"
- }
- ]
- },
- "network": {
- "interface": [
- {
- "macAddress": "000D3A047598",
- "ipv6": {
- "ipAddress": []
- },
- "ipv4": {
- "subnet": [
- {
- "prefix": "24",
- "address": "10.0.0.0"
- }
- ],
- "ipAddress": [
- {
- "privateIpAddress": "10.0.0.4",
- "publicIpAddress": "104.46.124.81"
- }
- ]
- }
- }
- ]
- }
-}
-
-SECONDARY_INTERFACE = {
- "macAddress": "220D3A047598",
- "ipv6": {
- "ipAddress": []
- },
- "ipv4": {
- "subnet": [
- {
- "prefix": "24",
- "address": "10.0.1.0"
- }
- ],
- "ipAddress": [
- {
- "privateIpAddress": "10.0.1.5",
- }
- ]
- }
-}
-
-IMDS_NETWORK_METADATA = {
- "interface": [
- {
- "macAddress": "000D3A047598",
- "ipv6": {
- "ipAddress": []
- },
- "ipv4": {
- "subnet": [
- {
- "prefix": "24",
- "address": "10.0.0.0"
- }
- ],
- "ipAddress": [
- {
- "privateIpAddress": "10.0.0.4",
- "publicIpAddress": "104.46.124.81"
- }
- ]
- }
- }
- ]
-}
-
-MOCKPATH = 'cloudinit.sources.DataSourceAzure.'
-
-
-class TestParseNetworkConfig(CiTestCase):
-
- maxDiff = None
- fallback_config = {
- 'version': 1,
- 'config': [{
- 'type': 'physical', 'name': 'eth0',
- 'mac_address': '00:11:22:33:44:55',
- 'params': {'driver': 'hv_netsvc'},
- 'subnets': [{'type': 'dhcp'}],
- }]
- }
-
- @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
- return_value=None)
- def test_single_ipv4_nic_configuration(self, m_driver):
- """parse_network_config emits dhcp on single nic with ipv4"""
- expected = {'ethernets': {
- 'eth0': {'dhcp4': True,
- 'dhcp4-overrides': {'route-metric': 100},
- 'dhcp6': False,
- 'match': {'macaddress': '00:0d:3a:04:75:98'},
- 'set-name': 'eth0'}}, 'version': 2}
- self.assertEqual(expected, dsaz.parse_network_config(NETWORK_METADATA))
-
- @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
- return_value=None)
- def test_increases_route_metric_for_non_primary_nics(self, m_driver):
- """parse_network_config increases route-metric for each nic"""
- expected = {'ethernets': {
- 'eth0': {'dhcp4': True,
- 'dhcp4-overrides': {'route-metric': 100},
- 'dhcp6': False,
- 'match': {'macaddress': '00:0d:3a:04:75:98'},
- 'set-name': 'eth0'},
- 'eth1': {'set-name': 'eth1',
- 'match': {'macaddress': '22:0d:3a:04:75:98'},
- 'dhcp6': False,
- 'dhcp4': True,
- 'dhcp4-overrides': {'route-metric': 200}},
- 'eth2': {'set-name': 'eth2',
- 'match': {'macaddress': '33:0d:3a:04:75:98'},
- 'dhcp6': False,
- 'dhcp4': True,
- 'dhcp4-overrides': {'route-metric': 300}}}, 'version': 2}
- imds_data = copy.deepcopy(NETWORK_METADATA)
- imds_data['network']['interface'].append(SECONDARY_INTERFACE)
- third_intf = copy.deepcopy(SECONDARY_INTERFACE)
- third_intf['macAddress'] = third_intf['macAddress'].replace('22', '33')
- third_intf['ipv4']['subnet'][0]['address'] = '10.0.2.0'
- third_intf['ipv4']['ipAddress'][0]['privateIpAddress'] = '10.0.2.6'
- imds_data['network']['interface'].append(third_intf)
- self.assertEqual(expected, dsaz.parse_network_config(imds_data))
-
- @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
- return_value=None)
- def test_ipv4_and_ipv6_route_metrics_match_for_nics(self, m_driver):
- """parse_network_config emits matching ipv4 and ipv6 route-metrics."""
- expected = {'ethernets': {
- 'eth0': {'addresses': ['10.0.0.5/24', '2001:dead:beef::2/128'],
- 'dhcp4': True,
- 'dhcp4-overrides': {'route-metric': 100},
- 'dhcp6': True,
- 'dhcp6-overrides': {'route-metric': 100},
- 'match': {'macaddress': '00:0d:3a:04:75:98'},
- 'set-name': 'eth0'},
- 'eth1': {'set-name': 'eth1',
- 'match': {'macaddress': '22:0d:3a:04:75:98'},
- 'dhcp4': True,
- 'dhcp6': False,
- 'dhcp4-overrides': {'route-metric': 200}},
- 'eth2': {'set-name': 'eth2',
- 'match': {'macaddress': '33:0d:3a:04:75:98'},
- 'dhcp4': True,
- 'dhcp4-overrides': {'route-metric': 300},
- 'dhcp6': True,
- 'dhcp6-overrides': {'route-metric': 300}}}, 'version': 2}
- imds_data = copy.deepcopy(NETWORK_METADATA)
- nic1 = imds_data['network']['interface'][0]
- nic1['ipv4']['ipAddress'].append({'privateIpAddress': '10.0.0.5'})
-
- nic1['ipv6'] = {
- "subnet": [{"address": "2001:dead:beef::16"}],
- "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"},
- {"privateIpAddress": "2001:dead:beef::2"}]
- }
- imds_data['network']['interface'].append(SECONDARY_INTERFACE)
- third_intf = copy.deepcopy(SECONDARY_INTERFACE)
- third_intf['macAddress'] = third_intf['macAddress'].replace('22', '33')
- third_intf['ipv4']['subnet'][0]['address'] = '10.0.2.0'
- third_intf['ipv4']['ipAddress'][0]['privateIpAddress'] = '10.0.2.6'
- third_intf['ipv6'] = {
- "subnet": [{"prefix": "64", "address": "2001:dead:beef::2"}],
- "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}]
- }
- imds_data['network']['interface'].append(third_intf)
- self.assertEqual(expected, dsaz.parse_network_config(imds_data))
-
- @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
- return_value=None)
- def test_ipv4_secondary_ips_will_be_static_addrs(self, m_driver):
- """parse_network_config emits primary ipv4 as dhcp others are static"""
- expected = {'ethernets': {
- 'eth0': {'addresses': ['10.0.0.5/24'],
- 'dhcp4': True,
- 'dhcp4-overrides': {'route-metric': 100},
- 'dhcp6': True,
- 'dhcp6-overrides': {'route-metric': 100},
- 'match': {'macaddress': '00:0d:3a:04:75:98'},
- 'set-name': 'eth0'}}, 'version': 2}
- imds_data = copy.deepcopy(NETWORK_METADATA)
- nic1 = imds_data['network']['interface'][0]
- nic1['ipv4']['ipAddress'].append({'privateIpAddress': '10.0.0.5'})
-
- nic1['ipv6'] = {
- "subnet": [{"prefix": "10", "address": "2001:dead:beef::16"}],
- "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}]
- }
- self.assertEqual(expected, dsaz.parse_network_config(imds_data))
-
- @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
- return_value=None)
- def test_ipv6_secondary_ips_will_be_static_cidrs(self, m_driver):
- """parse_network_config emits primary ipv6 as dhcp others are static"""
- expected = {'ethernets': {
- 'eth0': {'addresses': ['10.0.0.5/24', '2001:dead:beef::2/10'],
- 'dhcp4': True,
- 'dhcp4-overrides': {'route-metric': 100},
- 'dhcp6': True,
- 'dhcp6-overrides': {'route-metric': 100},
- 'match': {'macaddress': '00:0d:3a:04:75:98'},
- 'set-name': 'eth0'}}, 'version': 2}
- imds_data = copy.deepcopy(NETWORK_METADATA)
- nic1 = imds_data['network']['interface'][0]
- nic1['ipv4']['ipAddress'].append({'privateIpAddress': '10.0.0.5'})
-
- # Secondary ipv6 addresses currently ignored/unconfigured
- nic1['ipv6'] = {
- "subnet": [{"prefix": "10", "address": "2001:dead:beef::16"}],
- "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"},
- {"privateIpAddress": "2001:dead:beef::2"}]
- }
- self.assertEqual(expected, dsaz.parse_network_config(imds_data))
-
- @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
- return_value='hv_netvsc')
- def test_match_driver_for_netvsc(self, m_driver):
- """parse_network_config emits driver when using netvsc."""
- expected = {'ethernets': {
- 'eth0': {
- 'dhcp4': True,
- 'dhcp4-overrides': {'route-metric': 100},
- 'dhcp6': False,
- 'match': {
- 'macaddress': '00:0d:3a:04:75:98',
- 'driver': 'hv_netvsc',
- },
- 'set-name': 'eth0'
- }}, 'version': 2}
- self.assertEqual(expected, dsaz.parse_network_config(NETWORK_METADATA))
-
- @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
- return_value=None)
- @mock.patch('cloudinit.net.generate_fallback_config')
- def test_parse_network_config_uses_fallback_cfg_when_no_network_metadata(
- self, m_fallback_config, m_driver):
- """parse_network_config generates fallback network config when the
- IMDS instance metadata is corrupted/invalid, such as when
- network metadata is not present.
- """
- imds_metadata_missing_network_metadata = copy.deepcopy(
- NETWORK_METADATA)
- del imds_metadata_missing_network_metadata['network']
- m_fallback_config.return_value = self.fallback_config
- self.assertEqual(
- self.fallback_config,
- dsaz.parse_network_config(
- imds_metadata_missing_network_metadata))
-
- @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
- return_value=None)
- @mock.patch('cloudinit.net.generate_fallback_config')
- def test_parse_network_config_uses_fallback_cfg_when_no_interface_metadata(
- self, m_fallback_config, m_driver):
- """parse_network_config generates fallback network config when the
- IMDS instance metadata is corrupted/invalid, such as when
- network interface metadata is not present.
- """
- imds_metadata_missing_interface_metadata = copy.deepcopy(
- NETWORK_METADATA)
- del imds_metadata_missing_interface_metadata['network']['interface']
- m_fallback_config.return_value = self.fallback_config
- self.assertEqual(
- self.fallback_config,
- dsaz.parse_network_config(
- imds_metadata_missing_interface_metadata))
-
-
-class TestGetMetadataFromIMDS(HttprettyTestCase):
-
- with_logs = True
-
- def setUp(self):
- super(TestGetMetadataFromIMDS, self).setUp()
- self.network_md_url = dsaz.IMDS_URL + "instance?api-version=2019-06-01"
-
- @mock.patch(MOCKPATH + 'readurl')
- @mock.patch(MOCKPATH + 'EphemeralDHCPv4', autospec=True)
- @mock.patch(MOCKPATH + 'net.is_up', autospec=True)
- def test_get_metadata_does_not_dhcp_if_network_is_up(
- self, m_net_is_up, m_dhcp, m_readurl):
- """Do not perform DHCP setup when nic is already up."""
- m_net_is_up.return_value = True
- m_readurl.return_value = url_helper.StringResponse(
- json.dumps(NETWORK_METADATA).encode('utf-8'))
- self.assertEqual(
- NETWORK_METADATA,
- dsaz.get_metadata_from_imds('eth9', retries=3))
-
- m_net_is_up.assert_called_with('eth9')
- m_dhcp.assert_not_called()
- self.assertIn(
- "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time
- self.logs.getvalue())
-
- @mock.patch(MOCKPATH + 'readurl', autospec=True)
- @mock.patch(MOCKPATH + 'EphemeralDHCPv4')
- @mock.patch(MOCKPATH + 'net.is_up')
- def test_get_compute_metadata_uses_compute_url(
- self, m_net_is_up, m_dhcp, m_readurl):
- """Make sure readurl is called with the correct url when accessing
- network metadata"""
- m_net_is_up.return_value = True
- m_readurl.return_value = url_helper.StringResponse(
- json.dumps(IMDS_NETWORK_METADATA).encode('utf-8'))
-
- dsaz.get_metadata_from_imds(
- 'eth0', retries=3, md_type=dsaz.metadata_type.compute)
- m_readurl.assert_called_with(
- "http://169.254.169.254/metadata/instance?api-version="
- "2019-06-01", exception_cb=mock.ANY,
- headers=mock.ANY, retries=mock.ANY,
- timeout=mock.ANY)
-
- @mock.patch(MOCKPATH + 'readurl', autospec=True)
- @mock.patch(MOCKPATH + 'EphemeralDHCPv4')
- @mock.patch(MOCKPATH + 'net.is_up')
- def test_get_network_metadata_uses_network_url(
- self, m_net_is_up, m_dhcp, m_readurl):
- """Make sure readurl is called with the correct url when accessing
- network metadata"""
- m_net_is_up.return_value = True
- m_readurl.return_value = url_helper.StringResponse(
- json.dumps(IMDS_NETWORK_METADATA).encode('utf-8'))
-
- dsaz.get_metadata_from_imds(
- 'eth0', retries=3, md_type=dsaz.metadata_type.network)
- m_readurl.assert_called_with(
- "http://169.254.169.254/metadata/instance/network?api-version="
- "2019-06-01", exception_cb=mock.ANY,
- headers=mock.ANY, retries=mock.ANY,
- timeout=mock.ANY)
-
- @mock.patch(MOCKPATH + 'readurl', autospec=True)
- @mock.patch(MOCKPATH + 'EphemeralDHCPv4')
- @mock.patch(MOCKPATH + 'net.is_up')
- def test_get_default_metadata_uses_compute_url(
- self, m_net_is_up, m_dhcp, m_readurl):
- """Make sure readurl is called with the correct url when accessing
- network metadata"""
- m_net_is_up.return_value = True
- m_readurl.return_value = url_helper.StringResponse(
- json.dumps(IMDS_NETWORK_METADATA).encode('utf-8'))
-
- dsaz.get_metadata_from_imds(
- 'eth0', retries=3)
- m_readurl.assert_called_with(
- "http://169.254.169.254/metadata/instance?api-version="
- "2019-06-01", exception_cb=mock.ANY,
- headers=mock.ANY, retries=mock.ANY,
- timeout=mock.ANY)
-
- @mock.patch(MOCKPATH + 'readurl', autospec=True)
- @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting', autospec=True)
- @mock.patch(MOCKPATH + 'net.is_up', autospec=True)
- def test_get_metadata_performs_dhcp_when_network_is_down(
- self, m_net_is_up, m_dhcp, m_readurl):
- """Perform DHCP setup when nic is not up."""
- m_net_is_up.return_value = False
- m_readurl.return_value = url_helper.StringResponse(
- json.dumps(NETWORK_METADATA).encode('utf-8'))
-
- self.assertEqual(
- NETWORK_METADATA,
- dsaz.get_metadata_from_imds('eth9', retries=2))
-
- m_net_is_up.assert_called_with('eth9')
- m_dhcp.assert_called_with(mock.ANY, 'eth9')
- self.assertIn(
- "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time
- self.logs.getvalue())
-
- m_readurl.assert_called_with(
- self.network_md_url, exception_cb=mock.ANY,
- headers={'Metadata': 'true'}, retries=2,
- timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS)
-
- @mock.patch('cloudinit.url_helper.time.sleep')
- @mock.patch(MOCKPATH + 'net.is_up', autospec=True)
- def test_get_metadata_from_imds_empty_when_no_imds_present(
- self, m_net_is_up, m_sleep):
- """Return empty dict when IMDS network metadata is absent."""
- httpretty.register_uri(
- httpretty.GET,
- dsaz.IMDS_URL + 'instance?api-version=2017-12-01',
- body={}, status=404)
-
- m_net_is_up.return_value = True # skips dhcp
-
- self.assertEqual({}, dsaz.get_metadata_from_imds('eth9', retries=2))
-
- m_net_is_up.assert_called_with('eth9')
- self.assertEqual([mock.call(1), mock.call(1)], m_sleep.call_args_list)
- self.assertIn(
- "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time
- self.logs.getvalue())
-
- @mock.patch('requests.Session.request')
- @mock.patch('cloudinit.url_helper.time.sleep')
- @mock.patch(MOCKPATH + 'net.is_up', autospec=True)
- def test_get_metadata_from_imds_retries_on_timeout(
- self, m_net_is_up, m_sleep, m_request):
- """Retry IMDS network metadata on timeout errors."""
-
- self.attempt = 0
- m_request.side_effect = requests.Timeout('Fake Connection Timeout')
-
- def retry_callback(request, uri, headers):
- self.attempt += 1
- raise requests.Timeout('Fake connection timeout')
-
- httpretty.register_uri(
- httpretty.GET,
- dsaz.IMDS_URL + 'instance?api-version=2017-12-01',
- body=retry_callback)
-
- m_net_is_up.return_value = True # skips dhcp
-
- self.assertEqual({}, dsaz.get_metadata_from_imds('eth9', retries=3))
-
- m_net_is_up.assert_called_with('eth9')
- self.assertEqual([mock.call(1)]*3, m_sleep.call_args_list)
- self.assertIn(
- "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time
- self.logs.getvalue())
-
-
-class TestAzureDataSource(CiTestCase):
-
- with_logs = True
-
- def setUp(self):
- super(TestAzureDataSource, self).setUp()
- self.tmp = self.tmp_dir()
-
- # patch cloud_dir, so our 'seed_dir' is guaranteed empty
- self.paths = helpers.Paths(
- {'cloud_dir': self.tmp, 'run_dir': self.tmp})
- self.waagent_d = os.path.join(self.tmp, 'var', 'lib', 'waagent')
-
- self.patches = ExitStack()
- self.addCleanup(self.patches.close)
-
- self.patches.enter_context(mock.patch.object(
- dsaz, '_get_random_seed', return_value='wild'))
- self.m_get_metadata_from_imds = self.patches.enter_context(
- mock.patch.object(
- dsaz, 'get_metadata_from_imds',
- mock.MagicMock(return_value=NETWORK_METADATA)))
- self.m_fallback_nic = self.patches.enter_context(
- mock.patch('cloudinit.sources.net.find_fallback_nic',
- return_value='eth9'))
- self.m_remove_ubuntu_network_scripts = self.patches.enter_context(
- mock.patch.object(
- dsaz, 'maybe_remove_ubuntu_network_config_scripts',
- mock.MagicMock()))
- super(TestAzureDataSource, self).setUp()
-
- def apply_patches(self, patches):
- for module, name, new in patches:
- self.patches.enter_context(mock.patch.object(module, name, new))
-
- def _get_mockds(self):
- sysctl_out = "dev.storvsc.3.%pnpinfo: "\
- "classid=ba6163d9-04a1-4d29-b605-72e2ffb1dc7f "\
- "deviceid=f8b3781b-1e82-4818-a1c3-63d806ec15bb\n"
- sysctl_out += "dev.storvsc.2.%pnpinfo: "\
- "classid=ba6163d9-04a1-4d29-b605-72e2ffb1dc7f "\
- "deviceid=f8b3781a-1e82-4818-a1c3-63d806ec15bb\n"
- sysctl_out += "dev.storvsc.1.%pnpinfo: "\
- "classid=32412632-86cb-44a2-9b5c-50d1417354f5 "\
- "deviceid=00000000-0001-8899-0000-000000000000\n"
- camctl_devbus = """
-scbus0 on ata0 bus 0
-scbus1 on ata1 bus 0
-scbus2 on blkvsc0 bus 0
-scbus3 on blkvsc1 bus 0
-scbus4 on storvsc2 bus 0
-scbus5 on storvsc3 bus 0
-scbus-1 on xpt0 bus 0
- """
- camctl_dev = """
-<Msft Virtual CD/ROM 1.0> at scbus1 target 0 lun 0 (cd0,pass0)
-<Msft Virtual Disk 1.0> at scbus2 target 0 lun 0 (da0,pass1)
-<Msft Virtual Disk 1.0> at scbus3 target 1 lun 0 (da1,pass2)
- """
- self.apply_patches([
- (dsaz, 'get_dev_storvsc_sysctl', mock.MagicMock(
- return_value=sysctl_out)),
- (dsaz, 'get_camcontrol_dev_bus', mock.MagicMock(
- return_value=camctl_devbus)),
- (dsaz, 'get_camcontrol_dev', mock.MagicMock(
- return_value=camctl_dev))
- ])
- return dsaz
-
- def _get_ds(self, data, agent_command=None, distro='ubuntu',
- apply_network=None):
-
- def dsdevs():
- return data.get('dsdevs', [])
-
- def _invoke_agent(cmd):
- data['agent_invoked'] = cmd
-
- def _wait_for_files(flist, _maxwait=None, _naplen=None):
- data['waited'] = flist
- return []
-
- def _pubkeys_from_crt_files(flist):
- data['pubkey_files'] = flist
- return ["pubkey_from: %s" % f for f in flist]
-
- if data.get('ovfcontent') is not None:
- populate_dir(os.path.join(self.paths.seed_dir, "azure"),
- {'ovf-env.xml': data['ovfcontent']})
-
- dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
-
- self.m_is_platform_viable = mock.MagicMock(autospec=True)
- self.m_get_metadata_from_fabric = mock.MagicMock(
- return_value={'public-keys': []})
- self.m_report_failure_to_fabric = mock.MagicMock(autospec=True)
- self.m_ephemeral_dhcpv4 = mock.MagicMock()
- self.m_ephemeral_dhcpv4_with_reporting = mock.MagicMock()
-
- self.instance_id = 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8'
-
- def _dmi_mocks(key):
- if key == 'system-uuid':
- return self.instance_id
- elif key == 'chassis-asset-tag':
- return '7783-7084-3265-9085-8269-3286-77'
-
- self.apply_patches([
- (dsaz, 'list_possible_azure_ds_devs', dsdevs),
- (dsaz, 'invoke_agent', _invoke_agent),
- (dsaz, 'pubkeys_from_crt_files', _pubkeys_from_crt_files),
- (dsaz, 'perform_hostname_bounce', mock.MagicMock()),
- (dsaz, 'get_hostname', mock.MagicMock()),
- (dsaz, 'set_hostname', mock.MagicMock()),
- (dsaz, '_is_platform_viable',
- self.m_is_platform_viable),
- (dsaz, 'get_metadata_from_fabric',
- self.m_get_metadata_from_fabric),
- (dsaz, 'report_failure_to_fabric',
- self.m_report_failure_to_fabric),
- (dsaz, 'EphemeralDHCPv4', self.m_ephemeral_dhcpv4),
- (dsaz, 'EphemeralDHCPv4WithReporting',
- self.m_ephemeral_dhcpv4_with_reporting),
- (dsaz, 'get_boot_telemetry', mock.MagicMock()),
- (dsaz, 'get_system_info', mock.MagicMock()),
- (dsaz.subp, 'which', lambda x: True),
- (dsaz.dmi, 'read_dmi_data', mock.MagicMock(
- side_effect=_dmi_mocks)),
- (dsaz.util, 'wait_for_files', mock.MagicMock(
- side_effect=_wait_for_files)),
- ])
-
- if isinstance(distro, str):
- distro_cls = distros.fetch(distro)
- distro = distro_cls(distro, data.get('sys_cfg', {}), self.paths)
- dsrc = dsaz.DataSourceAzure(
- data.get('sys_cfg', {}), distro=distro, paths=self.paths)
- if agent_command is not None:
- dsrc.ds_cfg['agent_command'] = agent_command
- if apply_network is not None:
- dsrc.ds_cfg['apply_network_config'] = apply_network
-
- return dsrc
-
- def _get_and_setup(self, dsrc):
- ret = dsrc.get_data()
- if ret:
- dsrc.setup(True)
- return ret
-
- def xml_equals(self, oxml, nxml):
- """Compare two sets of XML to make sure they are equal"""
-
- def create_tag_index(xml):
- et = ET.fromstring(xml)
- ret = {}
- for x in et.iter():
- ret[x.tag] = x
- return ret
-
- def tags_exists(x, y):
- for tag in x.keys():
- assert tag in y
- for tag in y.keys():
- assert tag in x
-
- def tags_equal(x, y):
- for x_val in x.values():
- y_val = y.get(x_val.tag)
- assert x_val.text == y_val.text
-
- old_cnt = create_tag_index(oxml)
- new_cnt = create_tag_index(nxml)
- tags_exists(old_cnt, new_cnt)
- tags_equal(old_cnt, new_cnt)
-
- def xml_notequals(self, oxml, nxml):
- try:
- self.xml_equals(oxml, nxml)
- except AssertionError:
- return
- raise AssertionError("XML is the same")
-
- def test_get_resource_disk(self):
- ds = self._get_mockds()
- dev = ds.get_resource_disk_on_freebsd(1)
- self.assertEqual("da1", dev)
-
- def test_not_is_platform_viable_seed_should_return_no_datasource(self):
- """Check seed_dir using _is_platform_viable and return False."""
- # Return a non-matching asset tag value
- data = {}
- dsrc = self._get_ds(data)
- self.m_is_platform_viable.return_value = False
- with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \
- mock.patch.object(dsrc, '_report_failure') as m_report_failure:
- ret = dsrc.get_data()
- self.m_is_platform_viable.assert_called_with(dsrc.seed_dir)
- self.assertFalse(ret)
- self.assertNotIn('agent_invoked', data)
- # Assert that for non viable platforms,
- # there is no communication with the Azure datasource.
- self.assertEqual(
- 0,
- m_crawl_metadata.call_count)
- self.assertEqual(
- 0,
- m_report_failure.call_count)
-
- def test_platform_viable_but_no_devs_should_return_no_datasource(self):
- """For platforms where the Azure platform is viable
- (which is indicated by the matching asset tag),
- the absence of any devs at all (devs == candidate sources
- for crawling Azure datasource) is NOT expected.
- Report failure to Azure as this is an unexpected fatal error.
- """
- data = {}
- dsrc = self._get_ds(data)
- with mock.patch.object(dsrc, '_report_failure') as m_report_failure:
- self.m_is_platform_viable.return_value = True
- ret = dsrc.get_data()
- self.m_is_platform_viable.assert_called_with(dsrc.seed_dir)
- self.assertFalse(ret)
- self.assertNotIn('agent_invoked', data)
- self.assertEqual(
- 1,
- m_report_failure.call_count)
-
- def test_crawl_metadata_exception_returns_no_datasource(self):
- data = {}
- dsrc = self._get_ds(data)
- self.m_is_platform_viable.return_value = True
- with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata:
- m_crawl_metadata.side_effect = Exception
- ret = dsrc.get_data()
- self.m_is_platform_viable.assert_called_with(dsrc.seed_dir)
- self.assertEqual(
- 1,
- m_crawl_metadata.call_count)
- self.assertFalse(ret)
- self.assertNotIn('agent_invoked', data)
-
- def test_crawl_metadata_exception_should_report_failure_with_msg(self):
- data = {}
- dsrc = self._get_ds(data)
- self.m_is_platform_viable.return_value = True
- with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \
- mock.patch.object(dsrc, '_report_failure') as m_report_failure:
- m_crawl_metadata.side_effect = Exception
- dsrc.get_data()
- self.assertEqual(
- 1,
- m_crawl_metadata.call_count)
- m_report_failure.assert_called_once_with(
- description=dsaz.DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE)
-
- def test_crawl_metadata_exc_should_log_could_not_crawl_msg(self):
- data = {}
- dsrc = self._get_ds(data)
- self.m_is_platform_viable.return_value = True
- with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata:
- m_crawl_metadata.side_effect = Exception
- dsrc.get_data()
- self.assertEqual(
- 1,
- m_crawl_metadata.call_count)
- self.assertIn(
- "Could not crawl Azure metadata",
- self.logs.getvalue())
-
- def test_basic_seed_dir(self):
- odata = {'HostName': "myhost", 'UserName': "myuser"}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': {}}
- dsrc = self._get_ds(data)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertEqual(dsrc.userdata_raw, "")
- self.assertEqual(dsrc.metadata['local-hostname'], odata['HostName'])
- self.assertTrue(os.path.isfile(
- os.path.join(self.waagent_d, 'ovf-env.xml')))
- self.assertEqual('azure', dsrc.cloud_name)
- self.assertEqual('azure', dsrc.platform_type)
- self.assertEqual(
- 'seed-dir (%s/seed/azure)' % self.tmp, dsrc.subplatform)
-
- def test_basic_dev_file(self):
- """When a device path is used, present that in subplatform."""
- data = {'sys_cfg': {}, 'dsdevs': ['/dev/cd0']}
- dsrc = self._get_ds(data)
- with mock.patch(MOCKPATH + 'util.mount_cb') as m_mount_cb:
- m_mount_cb.return_value = (
- {'local-hostname': 'me'}, 'ud', {'cfg': ''}, {})
- self.assertTrue(dsrc.get_data())
- self.assertEqual(dsrc.userdata_raw, 'ud')
- self.assertEqual(dsrc.metadata['local-hostname'], 'me')
- self.assertEqual('azure', dsrc.cloud_name)
- self.assertEqual('azure', dsrc.platform_type)
- self.assertEqual('config-disk (/dev/cd0)', dsrc.subplatform)
-
- def test_get_data_non_ubuntu_will_not_remove_network_scripts(self):
- """get_data on non-Ubuntu will not remove ubuntu net scripts."""
- odata = {'HostName': "myhost", 'UserName': "myuser"}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': {}}
-
- dsrc = self._get_ds(data, distro='debian')
- dsrc.get_data()
- self.m_remove_ubuntu_network_scripts.assert_not_called()
-
- def test_get_data_on_ubuntu_will_remove_network_scripts(self):
- """get_data will remove ubuntu net scripts on Ubuntu distro."""
- sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
- odata = {'HostName': "myhost", 'UserName': "myuser"}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': sys_cfg}
-
- dsrc = self._get_ds(data, distro='ubuntu')
- dsrc.get_data()
- self.m_remove_ubuntu_network_scripts.assert_called_once_with()
-
- def test_get_data_on_ubuntu_will_not_remove_network_scripts_disabled(self):
- """When apply_network_config false, do not remove scripts on Ubuntu."""
- sys_cfg = {'datasource': {'Azure': {'apply_network_config': False}}}
- odata = {'HostName': "myhost", 'UserName': "myuser"}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': sys_cfg}
-
- dsrc = self._get_ds(data, distro='ubuntu')
- dsrc.get_data()
- self.m_remove_ubuntu_network_scripts.assert_not_called()
-
- def test_crawl_metadata_returns_structured_data_and_caches_nothing(self):
- """Return all structured metadata and cache no class attributes."""
- yaml_cfg = "{agent_command: my_command}\n"
- odata = {'HostName': "myhost", 'UserName': "myuser",
- 'UserData': {'text': 'FOOBAR', 'encoding': 'plain'},
- 'dscfg': {'text': yaml_cfg, 'encoding': 'plain'}}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': {}}
- dsrc = self._get_ds(data)
- expected_cfg = {
- 'PreprovisionedVMType': None,
- 'PreprovisionedVm': False,
- 'datasource': {'Azure': {'agent_command': 'my_command'}},
- 'system_info': {'default_user': {'name': u'myuser'}}}
- expected_metadata = {
- 'azure_data': {
- 'configurationsettype': 'LinuxProvisioningConfiguration'},
- 'imds': NETWORK_METADATA,
- 'instance-id': 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8',
- 'local-hostname': u'myhost',
- 'random_seed': 'wild'}
-
- crawled_metadata = dsrc.crawl_metadata()
-
- self.assertCountEqual(
- crawled_metadata.keys(),
- ['cfg', 'files', 'metadata', 'userdata_raw'])
- self.assertEqual(crawled_metadata['cfg'], expected_cfg)
- self.assertEqual(
- list(crawled_metadata['files'].keys()), ['ovf-env.xml'])
- self.assertIn(
- b'<HostName>myhost</HostName>',
- crawled_metadata['files']['ovf-env.xml'])
- self.assertEqual(crawled_metadata['metadata'], expected_metadata)
- self.assertEqual(crawled_metadata['userdata_raw'], 'FOOBAR')
- self.assertEqual(dsrc.userdata_raw, None)
- self.assertEqual(dsrc.metadata, {})
- self.assertEqual(dsrc._metadata_imds, UNSET)
- self.assertFalse(os.path.isfile(
- os.path.join(self.waagent_d, 'ovf-env.xml')))
-
- def test_crawl_metadata_raises_invalid_metadata_on_error(self):
- """crawl_metadata raises an exception on invalid ovf-env.xml."""
- data = {'ovfcontent': "BOGUS", 'sys_cfg': {}}
- dsrc = self._get_ds(data)
- error_msg = ('BrokenAzureDataSource: Invalid ovf-env.xml:'
- ' syntax error: line 1, column 0')
- with self.assertRaises(InvalidMetaDataException) as cm:
- dsrc.crawl_metadata()
- self.assertEqual(str(cm.exception), error_msg)
-
- @mock.patch(
- 'cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting')
- @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file')
- @mock.patch(
- 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready')
- @mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds')
- def test_crawl_metadata_on_reprovision_reports_ready(
- self, poll_imds_func, m_report_ready, m_write, m_dhcp
- ):
- """If reprovisioning, report ready at the end"""
- ovfenv = construct_valid_ovf_env(
- platform_settings={"PreprovisionedVm": "True"}
- )
-
- data = {
- 'ovfcontent': ovfenv,
- 'sys_cfg': {}
- }
- dsrc = self._get_ds(data)
- poll_imds_func.return_value = ovfenv
- dsrc.crawl_metadata()
- self.assertEqual(1, m_report_ready.call_count)
-
- @mock.patch(
- 'cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting')
- @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file')
- @mock.patch(
- 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready')
- @mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds')
- @mock.patch(
- 'cloudinit.sources.DataSourceAzure.DataSourceAzure.'
- '_wait_for_all_nics_ready')
- def test_crawl_metadata_waits_for_nic_on_savable_vms(
- self, detect_nics, poll_imds_func, report_ready_func, m_write, m_dhcp
- ):
- """If reprovisioning, report ready at the end"""
- ovfenv = construct_valid_ovf_env(
- platform_settings={"PreprovisionedVMType": "Savable",
- "PreprovisionedVm": "True"}
- )
-
- data = {
- 'ovfcontent': ovfenv,
- 'sys_cfg': {}
- }
- dsrc = self._get_ds(data)
- poll_imds_func.return_value = ovfenv
- dsrc.crawl_metadata()
- self.assertEqual(1, report_ready_func.call_count)
- self.assertEqual(1, detect_nics.call_count)
-
- @mock.patch(
- 'cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting')
- @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file')
- @mock.patch(
- 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready')
- @mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds')
- @mock.patch(
- 'cloudinit.sources.DataSourceAzure.DataSourceAzure.'
- '_wait_for_all_nics_ready')
- @mock.patch('os.path.isfile')
- def test_detect_nics_when_marker_present(
- self, is_file, detect_nics, poll_imds_func, report_ready_func, m_write,
- m_dhcp):
- """If reprovisioning, wait for nic attach if marker present"""
-
- def is_file_ret(key):
- return key == dsaz.REPROVISION_NIC_ATTACH_MARKER_FILE
-
- is_file.side_effect = is_file_ret
- ovfenv = construct_valid_ovf_env()
-
- data = {
- 'ovfcontent': ovfenv,
- 'sys_cfg': {}
- }
-
- dsrc = self._get_ds(data)
- poll_imds_func.return_value = ovfenv
- dsrc.crawl_metadata()
- self.assertEqual(1, report_ready_func.call_count)
- self.assertEqual(1, detect_nics.call_count)
-
- @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file')
- @mock.patch('cloudinit.sources.helpers.netlink.'
- 'wait_for_media_disconnect_connect')
- @mock.patch(
- 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready')
- @mock.patch('cloudinit.sources.DataSourceAzure.readurl')
- def test_crawl_metadata_on_reprovision_reports_ready_using_lease(
- self, m_readurl, m_report_ready,
- m_media_switch, m_write
- ):
- """If reprovisioning, report ready using the obtained lease"""
- ovfenv = construct_valid_ovf_env(
- platform_settings={"PreprovisionedVm": "True"}
- )
-
- data = {
- 'ovfcontent': ovfenv,
- 'sys_cfg': {}
- }
- dsrc = self._get_ds(data)
-
- with mock.patch.object(dsrc.distro.networking, 'is_up') \
- as m_dsrc_distro_networking_is_up:
-
- # For this mock, net should not be up,
- # so that cached ephemeral won't be used.
- # This is so that a NEW ephemeral dhcp lease will be discovered
- # and used instead.
- m_dsrc_distro_networking_is_up.return_value = False
-
- lease = {
- 'interface': 'eth9', 'fixed-address': '192.168.2.9',
- 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
- 'unknown-245': '624c3620'}
- self.m_ephemeral_dhcpv4_with_reporting.return_value \
- .__enter__.return_value = lease
- m_media_switch.return_value = None
-
- reprovision_ovfenv = construct_valid_ovf_env()
- m_readurl.return_value = url_helper.StringResponse(
- reprovision_ovfenv.encode('utf-8'))
-
- dsrc.crawl_metadata()
- self.assertEqual(2, m_report_ready.call_count)
- m_report_ready.assert_called_with(lease=lease)
-
- def test_waagent_d_has_0700_perms(self):
- # we expect /var/lib/waagent to be created 0700
- dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertTrue(os.path.isdir(self.waagent_d))
- self.assertEqual(stat.S_IMODE(os.stat(self.waagent_d).st_mode), 0o700)
-
- def test_user_cfg_set_agent_command_plain(self):
- # set dscfg in via plaintext
- # we must have friendly-to-xml formatted plaintext in yaml_cfg
- # not all plaintext is expected to work.
- yaml_cfg = "{agent_command: my_command}\n"
- cfg = yaml.safe_load(yaml_cfg)
- odata = {'HostName': "myhost", 'UserName': "myuser",
- 'dscfg': {'text': yaml_cfg, 'encoding': 'plain'}}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
-
- dsrc = self._get_ds(data)
- ret = self._get_and_setup(dsrc)
- self.assertTrue(ret)
- self.assertEqual(data['agent_invoked'], cfg['agent_command'])
-
- @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
- return_value=None)
- def test_network_config_set_from_imds(self, m_driver):
- """Datasource.network_config returns IMDS network data."""
- sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
- odata = {}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': sys_cfg}
- expected_network_config = {
- 'ethernets': {
- 'eth0': {'set-name': 'eth0',
- 'match': {'macaddress': '00:0d:3a:04:75:98'},
- 'dhcp6': False,
- 'dhcp4': True,
- 'dhcp4-overrides': {'route-metric': 100}}},
- 'version': 2}
- dsrc = self._get_ds(data)
- dsrc.get_data()
- self.assertEqual(expected_network_config, dsrc.network_config)
-
- @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
- return_value=None)
- def test_network_config_set_from_imds_route_metric_for_secondary_nic(
- self, m_driver):
- """Datasource.network_config adds route-metric to secondary nics."""
- sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
- odata = {}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': sys_cfg}
- expected_network_config = {
- 'ethernets': {
- 'eth0': {'set-name': 'eth0',
- 'match': {'macaddress': '00:0d:3a:04:75:98'},
- 'dhcp6': False,
- 'dhcp4': True,
- 'dhcp4-overrides': {'route-metric': 100}},
- 'eth1': {'set-name': 'eth1',
- 'match': {'macaddress': '22:0d:3a:04:75:98'},
- 'dhcp6': False,
- 'dhcp4': True,
- 'dhcp4-overrides': {'route-metric': 200}},
- 'eth2': {'set-name': 'eth2',
- 'match': {'macaddress': '33:0d:3a:04:75:98'},
- 'dhcp6': False,
- 'dhcp4': True,
- 'dhcp4-overrides': {'route-metric': 300}}},
- 'version': 2}
- imds_data = copy.deepcopy(NETWORK_METADATA)
- imds_data['network']['interface'].append(SECONDARY_INTERFACE)
- third_intf = copy.deepcopy(SECONDARY_INTERFACE)
- third_intf['macAddress'] = third_intf['macAddress'].replace('22', '33')
- third_intf['ipv4']['subnet'][0]['address'] = '10.0.2.0'
- third_intf['ipv4']['ipAddress'][0]['privateIpAddress'] = '10.0.2.6'
- imds_data['network']['interface'].append(third_intf)
-
- self.m_get_metadata_from_imds.return_value = imds_data
- dsrc = self._get_ds(data)
- dsrc.get_data()
- self.assertEqual(expected_network_config, dsrc.network_config)
-
- def test_availability_zone_set_from_imds(self):
- """Datasource.availability returns IMDS platformFaultDomain."""
- sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
- odata = {}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': sys_cfg}
- dsrc = self._get_ds(data)
- dsrc.get_data()
- self.assertEqual('0', dsrc.availability_zone)
-
- def test_region_set_from_imds(self):
- """Datasource.region returns IMDS region location."""
- sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
- odata = {}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': sys_cfg}
- dsrc = self._get_ds(data)
- dsrc.get_data()
- self.assertEqual('eastus2', dsrc.region)
-
- def test_user_cfg_set_agent_command(self):
- # set dscfg in via base64 encoded yaml
- cfg = {'agent_command': "my_command"}
- odata = {'HostName': "myhost", 'UserName': "myuser",
- 'dscfg': {'text': b64e(yaml.dump(cfg)),
- 'encoding': 'base64'}}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
-
- dsrc = self._get_ds(data)
- ret = self._get_and_setup(dsrc)
- self.assertTrue(ret)
- self.assertEqual(data['agent_invoked'], cfg['agent_command'])
-
- def test_sys_cfg_set_agent_command(self):
- sys_cfg = {'datasource': {'Azure': {'agent_command': '_COMMAND'}}}
- data = {'ovfcontent': construct_valid_ovf_env(data={}),
- 'sys_cfg': sys_cfg}
-
- dsrc = self._get_ds(data)
- ret = self._get_and_setup(dsrc)
- self.assertTrue(ret)
- self.assertEqual(data['agent_invoked'], '_COMMAND')
-
- def test_sys_cfg_set_never_destroy_ntfs(self):
- sys_cfg = {'datasource': {'Azure': {
- 'never_destroy_ntfs': 'user-supplied-value'}}}
- data = {'ovfcontent': construct_valid_ovf_env(data={}),
- 'sys_cfg': sys_cfg}
-
- dsrc = self._get_ds(data)
- ret = self._get_and_setup(dsrc)
- self.assertTrue(ret)
- self.assertEqual(dsrc.ds_cfg.get(dsaz.DS_CFG_KEY_PRESERVE_NTFS),
- 'user-supplied-value')
-
- def test_username_used(self):
- odata = {'HostName': "myhost", 'UserName': "myuser"}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
-
- dsrc = self._get_ds(data)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertEqual(dsrc.cfg['system_info']['default_user']['name'],
- "myuser")
-
- def test_password_given(self):
- odata = {'HostName': "myhost", 'UserName': "myuser",
- 'UserPassword': "mypass"}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
-
- dsrc = self._get_ds(data)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertIn('default_user', dsrc.cfg['system_info'])
- defuser = dsrc.cfg['system_info']['default_user']
-
- # default user should be updated username and should not be locked.
- self.assertEqual(defuser['name'], odata['UserName'])
- self.assertFalse(defuser['lock_passwd'])
- # passwd is crypt formated string $id$salt$encrypted
- # encrypting plaintext with salt value of everything up to final '$'
- # should equal that after the '$'
- pos = defuser['passwd'].rfind("$") + 1
- self.assertEqual(defuser['passwd'],
- crypt.crypt(odata['UserPassword'],
- defuser['passwd'][0:pos]))
-
- # the same hashed value should also be present in cfg['password']
- self.assertEqual(defuser['passwd'], dsrc.cfg['password'])
-
- def test_user_not_locked_if_password_redacted(self):
- odata = {'HostName': "myhost", 'UserName': "myuser",
- 'UserPassword': dsaz.DEF_PASSWD_REDACTION}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
-
- dsrc = self._get_ds(data)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertIn('default_user', dsrc.cfg['system_info'])
- defuser = dsrc.cfg['system_info']['default_user']
-
- # default user should be updated username and should not be locked.
- self.assertEqual(defuser['name'], odata['UserName'])
- self.assertIn('lock_passwd', defuser)
- self.assertFalse(defuser['lock_passwd'])
-
- def test_userdata_plain(self):
- mydata = "FOOBAR"
- odata = {'UserData': {'text': mydata, 'encoding': 'plain'}}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
-
- dsrc = self._get_ds(data)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertEqual(decode_binary(dsrc.userdata_raw), mydata)
-
- def test_userdata_found(self):
- mydata = "FOOBAR"
- odata = {'UserData': {'text': b64e(mydata), 'encoding': 'base64'}}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
-
- dsrc = self._get_ds(data)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertEqual(dsrc.userdata_raw, mydata.encode('utf-8'))
-
- def test_cfg_has_pubkeys_fingerprint(self):
- odata = {'HostName': "myhost", 'UserName': "myuser"}
- mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': ''}]
- pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist]
- data = {'ovfcontent': construct_valid_ovf_env(data=odata,
- pubkeys=pubkeys)}
-
- dsrc = self._get_ds(data, agent_command=['not', '__builtin__'])
- ret = self._get_and_setup(dsrc)
- self.assertTrue(ret)
- for mypk in mypklist:
- self.assertIn(mypk, dsrc.cfg['_pubkeys'])
- self.assertIn('pubkey_from', dsrc.metadata['public-keys'][-1])
-
- def test_cfg_has_pubkeys_value(self):
- # make sure that provided key is used over fingerprint
- odata = {'HostName': "myhost", 'UserName': "myuser"}
- mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': 'value1'}]
- pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist]
- data = {'ovfcontent': construct_valid_ovf_env(data=odata,
- pubkeys=pubkeys)}
-
- dsrc = self._get_ds(data, agent_command=['not', '__builtin__'])
- ret = self._get_and_setup(dsrc)
- self.assertTrue(ret)
-
- for mypk in mypklist:
- self.assertIn(mypk, dsrc.cfg['_pubkeys'])
- self.assertIn(mypk['value'], dsrc.metadata['public-keys'])
-
- def test_cfg_has_no_fingerprint_has_value(self):
- # test value is used when fingerprint not provided
- odata = {'HostName': "myhost", 'UserName': "myuser"}
- mypklist = [{'fingerprint': None, 'path': 'path1', 'value': 'value1'}]
- pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist]
- data = {'ovfcontent': construct_valid_ovf_env(data=odata,
- pubkeys=pubkeys)}
-
- dsrc = self._get_ds(data, agent_command=['not', '__builtin__'])
- ret = self._get_and_setup(dsrc)
- self.assertTrue(ret)
-
- for mypk in mypklist:
- self.assertIn(mypk['value'], dsrc.metadata['public-keys'])
-
- def test_default_ephemeral(self):
- # make sure the ephemeral device works
- odata = {}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': {}}
-
- dsrc = self._get_ds(data)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- cfg = dsrc.get_config_obj()
-
- self.assertEqual(dsrc.device_name_to_device("ephemeral0"),
- dsaz.RESOURCE_DISK_PATH)
- assert 'disk_setup' in cfg
- assert 'fs_setup' in cfg
- self.assertIsInstance(cfg['disk_setup'], dict)
- self.assertIsInstance(cfg['fs_setup'], list)
-
- def test_provide_disk_aliases(self):
- # Make sure that user can affect disk aliases
- dscfg = {'disk_aliases': {'ephemeral0': '/dev/sdc'}}
- odata = {'HostName': "myhost", 'UserName': "myuser",
- 'dscfg': {'text': b64e(yaml.dump(dscfg)),
- 'encoding': 'base64'}}
- usercfg = {'disk_setup': {'/dev/sdc': {'something': '...'},
- 'ephemeral0': False}}
- userdata = '#cloud-config' + yaml.dump(usercfg) + "\n"
-
- ovfcontent = construct_valid_ovf_env(data=odata, userdata=userdata)
- data = {'ovfcontent': ovfcontent, 'sys_cfg': {}}
-
- dsrc = self._get_ds(data)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- cfg = dsrc.get_config_obj()
- self.assertTrue(cfg)
-
- def test_userdata_arrives(self):
- userdata = "This is my user-data"
- xml = construct_valid_ovf_env(data={}, userdata=userdata)
- data = {'ovfcontent': xml}
- dsrc = self._get_ds(data)
- dsrc.get_data()
-
- self.assertEqual(userdata.encode('us-ascii'), dsrc.userdata_raw)
-
- def test_password_redacted_in_ovf(self):
- odata = {'HostName': "myhost", 'UserName': "myuser",
- 'UserPassword': "mypass"}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
- dsrc = self._get_ds(data)
- ret = dsrc.get_data()
-
- self.assertTrue(ret)
- ovf_env_path = os.path.join(self.waagent_d, 'ovf-env.xml')
-
- # The XML should not be same since the user password is redacted
- on_disk_ovf = load_file(ovf_env_path)
- self.xml_notequals(data['ovfcontent'], on_disk_ovf)
-
- # Make sure that the redacted password on disk is not used by CI
- self.assertNotEqual(dsrc.cfg.get('password'),
- dsaz.DEF_PASSWD_REDACTION)
-
- # Make sure that the password was really encrypted
- et = ET.fromstring(on_disk_ovf)
- for elem in et.iter():
- if 'UserPassword' in elem.tag:
- self.assertEqual(dsaz.DEF_PASSWD_REDACTION, elem.text)
-
- def test_ovf_env_arrives_in_waagent_dir(self):
- xml = construct_valid_ovf_env(data={}, userdata="FOODATA")
- dsrc = self._get_ds({'ovfcontent': xml})
- dsrc.get_data()
-
- # 'data_dir' is '/var/lib/waagent' (walinux-agent's state dir)
- # we expect that the ovf-env.xml file is copied there.
- ovf_env_path = os.path.join(self.waagent_d, 'ovf-env.xml')
- self.assertTrue(os.path.exists(ovf_env_path))
- self.xml_equals(xml, load_file(ovf_env_path))
-
- def test_ovf_can_include_unicode(self):
- xml = construct_valid_ovf_env(data={})
- xml = u'\ufeff{0}'.format(xml)
- dsrc = self._get_ds({'ovfcontent': xml})
- dsrc.get_data()
-
- def test_dsaz_report_ready_returns_true_when_report_succeeds(
- self):
- dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
- dsrc.ds_cfg['agent_command'] = '__builtin__'
- self.assertTrue(dsrc._report_ready(lease=mock.MagicMock()))
-
- def test_dsaz_report_ready_returns_false_and_does_not_propagate_exc(
- self):
- dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
- dsrc.ds_cfg['agent_command'] = '__builtin__'
- self.m_get_metadata_from_fabric.side_effect = Exception
- self.assertFalse(dsrc._report_ready(lease=mock.MagicMock()))
-
- def test_dsaz_report_failure_returns_true_when_report_succeeds(self):
- dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
- dsrc.ds_cfg['agent_command'] = '__builtin__'
-
- with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata:
- # mock crawl metadata failure to cause report failure
- m_crawl_metadata.side_effect = Exception
-
- self.assertTrue(dsrc._report_failure())
- self.assertEqual(
- 1,
- self.m_report_failure_to_fabric.call_count)
-
- def test_dsaz_report_failure_returns_false_and_does_not_propagate_exc(
- self):
- dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
- dsrc.ds_cfg['agent_command'] = '__builtin__'
-
- with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \
- mock.patch.object(dsrc, '_ephemeral_dhcp_ctx') \
- as m_ephemeral_dhcp_ctx, \
- mock.patch.object(dsrc.distro.networking, 'is_up') \
- as m_dsrc_distro_networking_is_up:
- # mock crawl metadata failure to cause report failure
- m_crawl_metadata.side_effect = Exception
-
- # setup mocks to allow using cached ephemeral dhcp lease
- m_dsrc_distro_networking_is_up.return_value = True
- test_lease_dhcp_option_245 = 'test_lease_dhcp_option_245'
- test_lease = {'unknown-245': test_lease_dhcp_option_245}
- m_ephemeral_dhcp_ctx.lease = test_lease
-
- # We expect 3 calls to report_failure_to_fabric,
- # because we try 3 different methods of calling report failure.
- # The different methods are attempted in the following order:
- # 1. Using cached ephemeral dhcp context to report failure to Azure
- # 2. Using new ephemeral dhcp to report failure to Azure
- # 3. Using fallback lease to report failure to Azure
- self.m_report_failure_to_fabric.side_effect = Exception
- self.assertFalse(dsrc._report_failure())
- self.assertEqual(
- 3,
- self.m_report_failure_to_fabric.call_count)
-
- def test_dsaz_report_failure_description_msg(self):
- dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
- dsrc.ds_cfg['agent_command'] = '__builtin__'
-
- with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata:
- # mock crawl metadata failure to cause report failure
- m_crawl_metadata.side_effect = Exception
-
- test_msg = 'Test report failure description message'
- self.assertTrue(dsrc._report_failure(description=test_msg))
- self.m_report_failure_to_fabric.assert_called_once_with(
- dhcp_opts=mock.ANY, description=test_msg)
-
- def test_dsaz_report_failure_no_description_msg(self):
- dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
- dsrc.ds_cfg['agent_command'] = '__builtin__'
-
- with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata:
- m_crawl_metadata.side_effect = Exception
-
- self.assertTrue(dsrc._report_failure()) # no description msg
- self.m_report_failure_to_fabric.assert_called_once_with(
- dhcp_opts=mock.ANY, description=None)
-
- def test_dsaz_report_failure_uses_cached_ephemeral_dhcp_ctx_lease(self):
- dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
- dsrc.ds_cfg['agent_command'] = '__builtin__'
-
- with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \
- mock.patch.object(dsrc, '_ephemeral_dhcp_ctx') \
- as m_ephemeral_dhcp_ctx, \
- mock.patch.object(dsrc.distro.networking, 'is_up') \
- as m_dsrc_distro_networking_is_up:
- # mock crawl metadata failure to cause report failure
- m_crawl_metadata.side_effect = Exception
-
- # setup mocks to allow using cached ephemeral dhcp lease
- m_dsrc_distro_networking_is_up.return_value = True
- test_lease_dhcp_option_245 = 'test_lease_dhcp_option_245'
- test_lease = {'unknown-245': test_lease_dhcp_option_245}
- m_ephemeral_dhcp_ctx.lease = test_lease
-
- self.assertTrue(dsrc._report_failure())
-
- # ensure called with cached ephemeral dhcp lease option 245
- self.m_report_failure_to_fabric.assert_called_once_with(
- description=mock.ANY, dhcp_opts=test_lease_dhcp_option_245)
-
- # ensure cached ephemeral is cleaned
- self.assertEqual(
- 1,
- m_ephemeral_dhcp_ctx.clean_network.call_count)
-
- def test_dsaz_report_failure_no_net_uses_new_ephemeral_dhcp_lease(self):
- dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
- dsrc.ds_cfg['agent_command'] = '__builtin__'
-
- with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \
- mock.patch.object(dsrc.distro.networking, 'is_up') \
- as m_dsrc_distro_networking_is_up:
- # mock crawl metadata failure to cause report failure
- m_crawl_metadata.side_effect = Exception
-
- # net is not up and cannot use cached ephemeral dhcp
- m_dsrc_distro_networking_is_up.return_value = False
- # setup ephemeral dhcp lease discovery mock
- test_lease_dhcp_option_245 = 'test_lease_dhcp_option_245'
- test_lease = {'unknown-245': test_lease_dhcp_option_245}
- self.m_ephemeral_dhcpv4_with_reporting.return_value \
- .__enter__.return_value = test_lease
-
- self.assertTrue(dsrc._report_failure())
-
- # ensure called with the newly discovered
- # ephemeral dhcp lease option 245
- self.m_report_failure_to_fabric.assert_called_once_with(
- description=mock.ANY, dhcp_opts=test_lease_dhcp_option_245)
-
- def test_dsaz_report_failure_no_net_and_no_dhcp_uses_fallback_lease(
- self):
- dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
- dsrc.ds_cfg['agent_command'] = '__builtin__'
-
- with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \
- mock.patch.object(dsrc.distro.networking, 'is_up') \
- as m_dsrc_distro_networking_is_up:
- # mock crawl metadata failure to cause report failure
- m_crawl_metadata.side_effect = Exception
-
- # net is not up and cannot use cached ephemeral dhcp
- m_dsrc_distro_networking_is_up.return_value = False
- # ephemeral dhcp discovery failure,
- # so cannot use a new ephemeral dhcp
- self.m_ephemeral_dhcpv4_with_reporting.return_value \
- .__enter__.side_effect = Exception
-
- self.assertTrue(dsrc._report_failure())
-
- # ensure called with fallback lease
- self.m_report_failure_to_fabric.assert_called_once_with(
- description=mock.ANY,
- fallback_lease_file=dsrc.dhclient_lease_file)
-
- def test_exception_fetching_fabric_data_doesnt_propagate(self):
- """Errors communicating with fabric should warn, but return True."""
- dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
- dsrc.ds_cfg['agent_command'] = '__builtin__'
- self.m_get_metadata_from_fabric.side_effect = Exception
- ret = self._get_and_setup(dsrc)
- self.assertTrue(ret)
-
- def test_fabric_data_included_in_metadata(self):
- dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
- dsrc.ds_cfg['agent_command'] = '__builtin__'
- self.m_get_metadata_from_fabric.return_value = {'test': 'value'}
- ret = self._get_and_setup(dsrc)
- self.assertTrue(ret)
- self.assertEqual('value', dsrc.metadata['test'])
-
- def test_instance_id_endianness(self):
- """Return the previous iid when dmi uuid is the byteswapped iid."""
- ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
- # byte-swapped previous
- write_file(
- os.path.join(self.paths.cloud_dir, 'data', 'instance-id'),
- '544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8')
- ds.get_data()
- self.assertEqual(
- '544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8', ds.metadata['instance-id'])
- # not byte-swapped previous
- write_file(
- os.path.join(self.paths.cloud_dir, 'data', 'instance-id'),
- '644CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8')
- ds.get_data()
- self.assertEqual(
- 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8', ds.metadata['instance-id'])
-
- def test_instance_id_from_dmidecode_used(self):
- ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
- ds.get_data()
- self.assertEqual(self.instance_id, ds.metadata['instance-id'])
-
- def test_instance_id_from_dmidecode_used_for_builtin(self):
- ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
- ds.ds_cfg['agent_command'] = '__builtin__'
- ds.get_data()
- self.assertEqual(self.instance_id, ds.metadata['instance-id'])
-
- @mock.patch(MOCKPATH + 'util.is_FreeBSD')
- @mock.patch(MOCKPATH + '_check_freebsd_cdrom')
- def test_list_possible_azure_ds_devs(self, m_check_fbsd_cdrom,
- m_is_FreeBSD):
- """On FreeBSD, possible devs should show /dev/cd0."""
- m_is_FreeBSD.return_value = True
- m_check_fbsd_cdrom.return_value = True
- self.assertEqual(dsaz.list_possible_azure_ds_devs(), ['/dev/cd0'])
- self.assertEqual(
- [mock.call("/dev/cd0")], m_check_fbsd_cdrom.call_args_list)
-
- @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
- return_value=None)
- @mock.patch('cloudinit.net.generate_fallback_config')
- def test_imds_network_config(self, mock_fallback, m_driver):
- """Network config is generated from IMDS network data when present."""
- sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
- odata = {'HostName': "myhost", 'UserName': "myuser"}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': sys_cfg}
-
- dsrc = self._get_ds(data)
- ret = dsrc.get_data()
- self.assertTrue(ret)
-
- expected_cfg = {
- 'ethernets': {
- 'eth0': {'dhcp4': True,
- 'dhcp4-overrides': {'route-metric': 100},
- 'dhcp6': False,
- 'match': {'macaddress': '00:0d:3a:04:75:98'},
- 'set-name': 'eth0'}},
- 'version': 2}
-
- self.assertEqual(expected_cfg, dsrc.network_config)
- mock_fallback.assert_not_called()
-
- @mock.patch('cloudinit.net.get_interface_mac')
- @mock.patch('cloudinit.net.get_devicelist')
- @mock.patch('cloudinit.net.device_driver')
- @mock.patch('cloudinit.net.generate_fallback_config')
- def test_imds_network_ignored_when_apply_network_config_false(
- self, mock_fallback, mock_dd, mock_devlist, mock_get_mac):
- """When apply_network_config is False, use fallback instead of IMDS."""
- sys_cfg = {'datasource': {'Azure': {'apply_network_config': False}}}
- odata = {'HostName': "myhost", 'UserName': "myuser"}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': sys_cfg}
- fallback_config = {
- 'version': 1,
- 'config': [{
- 'type': 'physical', 'name': 'eth0',
- 'mac_address': '00:11:22:33:44:55',
- 'params': {'driver': 'hv_netsvc'},
- 'subnets': [{'type': 'dhcp'}],
- }]
- }
- mock_fallback.return_value = fallback_config
-
- mock_devlist.return_value = ['eth0']
- mock_dd.return_value = ['hv_netsvc']
- mock_get_mac.return_value = '00:11:22:33:44:55'
-
- dsrc = self._get_ds(data)
- self.assertTrue(dsrc.get_data())
- self.assertEqual(dsrc.network_config, fallback_config)
-
- @mock.patch('cloudinit.net.get_interface_mac')
- @mock.patch('cloudinit.net.get_devicelist')
- @mock.patch('cloudinit.net.device_driver')
- @mock.patch('cloudinit.net.generate_fallback_config', autospec=True)
- def test_fallback_network_config(self, mock_fallback, mock_dd,
- mock_devlist, mock_get_mac):
- """On absent IMDS network data, generate network fallback config."""
- odata = {'HostName': "myhost", 'UserName': "myuser"}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': {}}
-
- fallback_config = {
- 'version': 1,
- 'config': [{
- 'type': 'physical', 'name': 'eth0',
- 'mac_address': '00:11:22:33:44:55',
- 'params': {'driver': 'hv_netsvc'},
- 'subnets': [{'type': 'dhcp'}],
- }]
- }
- mock_fallback.return_value = fallback_config
-
- mock_devlist.return_value = ['eth0']
- mock_dd.return_value = ['hv_netsvc']
- mock_get_mac.return_value = '00:11:22:33:44:55'
-
- dsrc = self._get_ds(data)
- # Represent empty response from network imds
- self.m_get_metadata_from_imds.return_value = {}
- ret = dsrc.get_data()
- self.assertTrue(ret)
-
- netconfig = dsrc.network_config
- self.assertEqual(netconfig, fallback_config)
- mock_fallback.assert_called_with(
- blacklist_drivers=['mlx4_core', 'mlx5_core'],
- config_driver=True)
-
- @mock.patch(MOCKPATH + 'net.get_interfaces', autospec=True)
- @mock.patch(MOCKPATH + 'util.is_FreeBSD')
- def test_blacklist_through_distro(
- self, m_is_freebsd, m_net_get_interfaces):
- """Verify Azure DS updates blacklist drivers in the distro's
- networking object."""
- odata = {'HostName': "myhost", 'UserName': "myuser"}
- data = {'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': {}}
-
- distro_cls = distros.fetch('ubuntu')
- distro = distro_cls('ubuntu', {}, self.paths)
- dsrc = self._get_ds(data, distro=distro)
- dsrc.get_data()
- self.assertEqual(distro.networking.blacklist_drivers,
- dsaz.BLACKLIST_DRIVERS)
-
- m_is_freebsd.return_value = False
- distro.networking.get_interfaces_by_mac()
- m_net_get_interfaces.assert_called_with(
- blacklist_drivers=dsaz.BLACKLIST_DRIVERS)
-
- @mock.patch(MOCKPATH + 'subp.subp', autospec=True)
- def test_get_hostname_with_no_args(self, m_subp):
- dsaz.get_hostname()
- m_subp.assert_called_once_with(("hostname",), capture=True)
-
- @mock.patch(MOCKPATH + 'subp.subp', autospec=True)
- def test_get_hostname_with_string_arg(self, m_subp):
- dsaz.get_hostname(hostname_command="hostname")
- m_subp.assert_called_once_with(("hostname",), capture=True)
-
- @mock.patch(MOCKPATH + 'subp.subp', autospec=True)
- def test_get_hostname_with_iterable_arg(self, m_subp):
- dsaz.get_hostname(hostname_command=("hostname",))
- m_subp.assert_called_once_with(("hostname",), capture=True)
-
- @mock.patch(
- 'cloudinit.sources.helpers.azure.OpenSSLManager.parse_certificates')
- def test_get_public_ssh_keys_with_imds(self, m_parse_certificates):
- sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
- odata = {'HostName': "myhost", 'UserName': "myuser"}
- data = {
- 'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': sys_cfg
- }
- dsrc = self._get_ds(data)
- dsrc.get_data()
- dsrc.setup(True)
- ssh_keys = dsrc.get_public_ssh_keys()
- self.assertEqual(ssh_keys, ['key1'])
- self.assertEqual(m_parse_certificates.call_count, 0)
-
- @mock.patch(MOCKPATH + 'get_metadata_from_imds')
- def test_get_public_ssh_keys_without_imds(
- self,
- m_get_metadata_from_imds):
- m_get_metadata_from_imds.return_value = dict()
- sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
- odata = {'HostName': "myhost", 'UserName': "myuser"}
- data = {
- 'ovfcontent': construct_valid_ovf_env(data=odata),
- 'sys_cfg': sys_cfg
- }
- dsrc = self._get_ds(data)
- dsaz.get_metadata_from_fabric.return_value = {'public-keys': ['key2']}
- dsrc.get_data()
- dsrc.setup(True)
- ssh_keys = dsrc.get_public_ssh_keys()
- self.assertEqual(ssh_keys, ['key2'])
-
-
-class TestAzureBounce(CiTestCase):
-
- with_logs = True
-
- def mock_out_azure_moving_parts(self):
- self.patches.enter_context(
- mock.patch.object(dsaz, 'invoke_agent'))
- self.patches.enter_context(
- mock.patch.object(dsaz.util, 'wait_for_files'))
- self.patches.enter_context(
- mock.patch.object(dsaz, 'list_possible_azure_ds_devs',
- mock.MagicMock(return_value=[])))
- self.patches.enter_context(
- mock.patch.object(dsaz, 'get_metadata_from_fabric',
- mock.MagicMock(return_value={})))
- self.patches.enter_context(
- mock.patch.object(dsaz, 'get_metadata_from_imds',
- mock.MagicMock(return_value={})))
- self.patches.enter_context(
- mock.patch.object(dsaz.subp, 'which', lambda x: True))
- self.patches.enter_context(mock.patch.object(
- dsaz, '_get_random_seed', return_value='wild'))
-
- def _dmi_mocks(key):
- if key == 'system-uuid':
- return 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8'
- elif key == 'chassis-asset-tag':
- return '7783-7084-3265-9085-8269-3286-77'
- raise RuntimeError('should not get here')
-
- self.patches.enter_context(
- mock.patch.object(dsaz.dmi, 'read_dmi_data',
- mock.MagicMock(side_effect=_dmi_mocks)))
-
- def setUp(self):
- super(TestAzureBounce, self).setUp()
- self.tmp = self.tmp_dir()
- self.waagent_d = os.path.join(self.tmp, 'var', 'lib', 'waagent')
- self.paths = helpers.Paths(
- {'cloud_dir': self.tmp, 'run_dir': self.tmp})
- dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
- self.patches = ExitStack()
- self.mock_out_azure_moving_parts()
- self.get_hostname = self.patches.enter_context(
- mock.patch.object(dsaz, 'get_hostname'))
- self.set_hostname = self.patches.enter_context(
- mock.patch.object(dsaz, 'set_hostname'))
- self.subp = self.patches.enter_context(
- mock.patch(MOCKPATH + 'subp.subp'))
- self.find_fallback_nic = self.patches.enter_context(
- mock.patch('cloudinit.net.find_fallback_nic', return_value='eth9'))
-
- def tearDown(self):
- self.patches.close()
- super(TestAzureBounce, self).tearDown()
-
- def _get_ds(self, ovfcontent=None, agent_command=None):
- if ovfcontent is not None:
- populate_dir(os.path.join(self.paths.seed_dir, "azure"),
- {'ovf-env.xml': ovfcontent})
- dsrc = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
- if agent_command is not None:
- dsrc.ds_cfg['agent_command'] = agent_command
- return dsrc
-
- def _get_and_setup(self, dsrc):
- ret = dsrc.get_data()
- if ret:
- dsrc.setup(True)
- return ret
-
- def get_ovf_env_with_dscfg(self, hostname, cfg):
- odata = {
- 'HostName': hostname,
- 'dscfg': {
- 'text': b64e(yaml.dump(cfg)),
- 'encoding': 'base64'
- }
- }
- return construct_valid_ovf_env(data=odata)
-
- def test_disabled_bounce_does_not_change_hostname(self):
- cfg = {'hostname_bounce': {'policy': 'off'}}
- ds = self._get_ds(self.get_ovf_env_with_dscfg('test-host', cfg))
- ds.get_data()
- self.assertEqual(0, self.set_hostname.call_count)
-
- @mock.patch(MOCKPATH + 'perform_hostname_bounce')
- def test_disabled_bounce_does_not_perform_bounce(
- self, perform_hostname_bounce):
- cfg = {'hostname_bounce': {'policy': 'off'}}
- ds = self._get_ds(self.get_ovf_env_with_dscfg('test-host', cfg))
- ds.get_data()
- self.assertEqual(0, perform_hostname_bounce.call_count)
-
- def test_same_hostname_does_not_change_hostname(self):
- host_name = 'unchanged-host-name'
- self.get_hostname.return_value = host_name
- cfg = {'hostname_bounce': {'policy': 'yes'}}
- ds = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg))
- ds.get_data()
- self.assertEqual(0, self.set_hostname.call_count)
-
- @mock.patch(MOCKPATH + 'perform_hostname_bounce')
- def test_unchanged_hostname_does_not_perform_bounce(
- self, perform_hostname_bounce):
- host_name = 'unchanged-host-name'
- self.get_hostname.return_value = host_name
- cfg = {'hostname_bounce': {'policy': 'yes'}}
- ds = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg))
- ds.get_data()
- self.assertEqual(0, perform_hostname_bounce.call_count)
-
- @mock.patch(MOCKPATH + 'perform_hostname_bounce')
- def test_force_performs_bounce_regardless(self, perform_hostname_bounce):
- host_name = 'unchanged-host-name'
- self.get_hostname.return_value = host_name
- cfg = {'hostname_bounce': {'policy': 'force'}}
- dsrc = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg),
- agent_command=['not', '__builtin__'])
- ret = self._get_and_setup(dsrc)
- self.assertTrue(ret)
- self.assertEqual(1, perform_hostname_bounce.call_count)
-
- def test_bounce_skipped_on_ifupdown_absent(self):
- host_name = 'unchanged-host-name'
- self.get_hostname.return_value = host_name
- cfg = {'hostname_bounce': {'policy': 'force'}}
- dsrc = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg),
- agent_command=['not', '__builtin__'])
- patch_path = MOCKPATH + 'subp.which'
- with mock.patch(patch_path) as m_which:
- m_which.return_value = None
- ret = self._get_and_setup(dsrc)
- self.assertEqual([mock.call('ifup')], m_which.call_args_list)
- self.assertTrue(ret)
- self.assertIn(
- "Skipping network bounce: ifupdown utils aren't present.",
- self.logs.getvalue())
-
- def test_different_hostnames_sets_hostname(self):
- expected_hostname = 'azure-expected-host-name'
- self.get_hostname.return_value = 'default-host-name'
- dsrc = self._get_ds(
- self.get_ovf_env_with_dscfg(expected_hostname, {}),
- agent_command=['not', '__builtin__'])
- ret = self._get_and_setup(dsrc)
- self.assertTrue(ret)
- self.assertEqual(expected_hostname,
- self.set_hostname.call_args_list[0][0][0])
-
- @mock.patch(MOCKPATH + 'perform_hostname_bounce')
- def test_different_hostnames_performs_bounce(
- self, perform_hostname_bounce):
- expected_hostname = 'azure-expected-host-name'
- self.get_hostname.return_value = 'default-host-name'
- dsrc = self._get_ds(
- self.get_ovf_env_with_dscfg(expected_hostname, {}),
- agent_command=['not', '__builtin__'])
- ret = self._get_and_setup(dsrc)
- self.assertTrue(ret)
- self.assertEqual(1, perform_hostname_bounce.call_count)
-
- def test_different_hostnames_sets_hostname_back(self):
- initial_host_name = 'default-host-name'
- self.get_hostname.return_value = initial_host_name
- dsrc = self._get_ds(
- self.get_ovf_env_with_dscfg('some-host-name', {}),
- agent_command=['not', '__builtin__'])
- ret = self._get_and_setup(dsrc)
- self.assertTrue(ret)
- self.assertEqual(initial_host_name,
- self.set_hostname.call_args_list[-1][0][0])
-
- @mock.patch(MOCKPATH + 'perform_hostname_bounce')
- def test_failure_in_bounce_still_resets_host_name(
- self, perform_hostname_bounce):
- perform_hostname_bounce.side_effect = Exception
- initial_host_name = 'default-host-name'
- self.get_hostname.return_value = initial_host_name
- dsrc = self._get_ds(
- self.get_ovf_env_with_dscfg('some-host-name', {}),
- agent_command=['not', '__builtin__'])
- ret = self._get_and_setup(dsrc)
- self.assertTrue(ret)
- self.assertEqual(initial_host_name,
- self.set_hostname.call_args_list[-1][0][0])
-
- @mock.patch.object(dsaz, 'get_boot_telemetry')
- def test_environment_correct_for_bounce_command(
- self, mock_get_boot_telemetry):
- interface = 'int0'
- hostname = 'my-new-host'
- old_hostname = 'my-old-host'
- self.get_hostname.return_value = old_hostname
- cfg = {'hostname_bounce': {'interface': interface, 'policy': 'force'}}
- data = self.get_ovf_env_with_dscfg(hostname, cfg)
- dsrc = self._get_ds(data, agent_command=['not', '__builtin__'])
- ret = self._get_and_setup(dsrc)
- self.assertTrue(ret)
- self.assertEqual(1, self.subp.call_count)
- bounce_env = self.subp.call_args[1]['env']
- self.assertEqual(interface, bounce_env['interface'])
- self.assertEqual(hostname, bounce_env['hostname'])
- self.assertEqual(old_hostname, bounce_env['old_hostname'])
-
- @mock.patch.object(dsaz, 'get_boot_telemetry')
- def test_default_bounce_command_ifup_used_by_default(
- self, mock_get_boot_telemetry):
- cfg = {'hostname_bounce': {'policy': 'force'}}
- data = self.get_ovf_env_with_dscfg('some-hostname', cfg)
- dsrc = self._get_ds(data, agent_command=['not', '__builtin__'])
- ret = self._get_and_setup(dsrc)
- self.assertTrue(ret)
- self.assertEqual(1, self.subp.call_count)
- bounce_args = self.subp.call_args[1]['args']
- self.assertEqual(
- dsaz.BOUNCE_COMMAND_IFUP, bounce_args)
-
- @mock.patch(MOCKPATH + 'perform_hostname_bounce')
- def test_set_hostname_option_can_disable_bounce(
- self, perform_hostname_bounce):
- cfg = {'set_hostname': False, 'hostname_bounce': {'policy': 'force'}}
- data = self.get_ovf_env_with_dscfg('some-hostname', cfg)
- self._get_ds(data).get_data()
-
- self.assertEqual(0, perform_hostname_bounce.call_count)
-
- def test_set_hostname_option_can_disable_hostname_set(self):
- cfg = {'set_hostname': False, 'hostname_bounce': {'policy': 'force'}}
- data = self.get_ovf_env_with_dscfg('some-hostname', cfg)
- self._get_ds(data).get_data()
-
- self.assertEqual(0, self.set_hostname.call_count)
-
- @mock.patch(MOCKPATH + 'perform_hostname_bounce')
- def test_set_hostname_failed_disable_bounce(
- self, perform_hostname_bounce):
- cfg = {'set_hostname': True, 'hostname_bounce': {'policy': 'force'}}
- self.get_hostname.return_value = "old-hostname"
- self.set_hostname.side_effect = Exception
- data = self.get_ovf_env_with_dscfg('some-hostname', cfg)
- self._get_ds(data).get_data()
-
- self.assertEqual(0, perform_hostname_bounce.call_count)
-
-
-class TestLoadAzureDsDir(CiTestCase):
- """Tests for load_azure_ds_dir."""
-
- def setUp(self):
- self.source_dir = self.tmp_dir()
- super(TestLoadAzureDsDir, self).setUp()
-
- def test_missing_ovf_env_xml_raises_non_azure_datasource_error(self):
- """load_azure_ds_dir raises an error When ovf-env.xml doesn't exit."""
- with self.assertRaises(dsaz.NonAzureDataSource) as context_manager:
- dsaz.load_azure_ds_dir(self.source_dir)
- self.assertEqual(
- 'No ovf-env file found',
- str(context_manager.exception))
-
- def test_wb_invalid_ovf_env_xml_calls_read_azure_ovf(self):
- """load_azure_ds_dir calls read_azure_ovf to parse the xml."""
- ovf_path = os.path.join(self.source_dir, 'ovf-env.xml')
- with open(ovf_path, 'wb') as stream:
- stream.write(b'invalid xml')
- with self.assertRaises(dsaz.BrokenAzureDataSource) as context_manager:
- dsaz.load_azure_ds_dir(self.source_dir)
- self.assertEqual(
- 'Invalid ovf-env.xml: syntax error: line 1, column 0',
- str(context_manager.exception))
-
-
-class TestReadAzureOvf(CiTestCase):
-
- def test_invalid_xml_raises_non_azure_ds(self):
- invalid_xml = "<foo>" + construct_valid_ovf_env(data={})
- self.assertRaises(dsaz.BrokenAzureDataSource,
- dsaz.read_azure_ovf, invalid_xml)
-
- def test_load_with_pubkeys(self):
- mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': ''}]
- pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist]
- content = construct_valid_ovf_env(pubkeys=pubkeys)
- (_md, _ud, cfg) = dsaz.read_azure_ovf(content)
- for mypk in mypklist:
- self.assertIn(mypk, cfg['_pubkeys'])
-
-
-class TestCanDevBeReformatted(CiTestCase):
- warning_file = 'dataloss_warning_readme.txt'
-
- def _domock(self, mockpath, sattr=None):
- patcher = mock.patch(mockpath)
- setattr(self, sattr, patcher.start())
- self.addCleanup(patcher.stop)
-
- def patchup(self, devs):
- bypath = {}
- for path, data in devs.items():
- bypath[path] = data
- if 'realpath' in data:
- bypath[data['realpath']] = data
- for ppath, pdata in data.get('partitions', {}).items():
- bypath[ppath] = pdata
- if 'realpath' in data:
- bypath[pdata['realpath']] = pdata
-
- def realpath(d):
- return bypath[d].get('realpath', d)
-
- def partitions_on_device(devpath):
- parts = bypath.get(devpath, {}).get('partitions', {})
- ret = []
- for path, data in parts.items():
- ret.append((data.get('num'), realpath(path)))
- # return sorted by partition number
- return sorted(ret, key=lambda d: d[0])
-
- def mount_cb(device, callback, mtype, update_env_for_mount):
- self.assertEqual('ntfs', mtype)
- self.assertEqual('C', update_env_for_mount.get('LANG'))
- p = self.tmp_dir()
- for f in bypath.get(device).get('files', []):
- write_file(os.path.join(p, f), content=f)
- return callback(p)
-
- def has_ntfs_fs(device):
- return bypath.get(device, {}).get('fs') == 'ntfs'
-
- p = MOCKPATH
- self._domock(p + "_partitions_on_device", 'm_partitions_on_device')
- self._domock(p + "_has_ntfs_filesystem", 'm_has_ntfs_filesystem')
- self._domock(p + "util.mount_cb", 'm_mount_cb')
- self._domock(p + "os.path.realpath", 'm_realpath')
- self._domock(p + "os.path.exists", 'm_exists')
- self._domock(p + "util.SeLinuxGuard", 'm_selguard')
-
- self.m_exists.side_effect = lambda p: p in bypath
- self.m_realpath.side_effect = realpath
- self.m_has_ntfs_filesystem.side_effect = has_ntfs_fs
- self.m_mount_cb.side_effect = mount_cb
- self.m_partitions_on_device.side_effect = partitions_on_device
- self.m_selguard.__enter__ = mock.Mock(return_value=False)
- self.m_selguard.__exit__ = mock.Mock()
-
- def test_three_partitions_is_false(self):
- """A disk with 3 partitions can not be formatted."""
- self.patchup({
- '/dev/sda': {
- 'partitions': {
- '/dev/sda1': {'num': 1},
- '/dev/sda2': {'num': 2},
- '/dev/sda3': {'num': 3},
- }}})
- value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
- preserve_ntfs=False)
- self.assertFalse(value)
- self.assertIn("3 or more", msg.lower())
-
- def test_no_partitions_is_false(self):
- """A disk with no partitions can not be formatted."""
- self.patchup({'/dev/sda': {}})
- value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
- preserve_ntfs=False)
- self.assertFalse(value)
- self.assertIn("not partitioned", msg.lower())
-
- def test_two_partitions_not_ntfs_false(self):
- """2 partitions and 2nd not ntfs can not be formatted."""
- self.patchup({
- '/dev/sda': {
- 'partitions': {
- '/dev/sda1': {'num': 1},
- '/dev/sda2': {'num': 2, 'fs': 'ext4', 'files': []},
- }}})
- value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
- preserve_ntfs=False)
- self.assertFalse(value)
- self.assertIn("not ntfs", msg.lower())
-
- def test_two_partitions_ntfs_populated_false(self):
- """2 partitions and populated ntfs fs on 2nd can not be formatted."""
- self.patchup({
- '/dev/sda': {
- 'partitions': {
- '/dev/sda1': {'num': 1},
- '/dev/sda2': {'num': 2, 'fs': 'ntfs',
- 'files': ['secret.txt']},
- }}})
- value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
- preserve_ntfs=False)
- self.assertFalse(value)
- self.assertIn("files on it", msg.lower())
-
- def test_two_partitions_ntfs_empty_is_true(self):
- """2 partitions and empty ntfs fs on 2nd can be formatted."""
- self.patchup({
- '/dev/sda': {
- 'partitions': {
- '/dev/sda1': {'num': 1},
- '/dev/sda2': {'num': 2, 'fs': 'ntfs', 'files': []},
- }}})
- value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
- preserve_ntfs=False)
- self.assertTrue(value)
- self.assertIn("safe for", msg.lower())
-
- def test_one_partition_not_ntfs_false(self):
- """1 partition witih fs other than ntfs can not be formatted."""
- self.patchup({
- '/dev/sda': {
- 'partitions': {
- '/dev/sda1': {'num': 1, 'fs': 'zfs'},
- }}})
- value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
- preserve_ntfs=False)
- self.assertFalse(value)
- self.assertIn("not ntfs", msg.lower())
-
- def test_one_partition_ntfs_populated_false(self):
- """1 mountable ntfs partition with many files can not be formatted."""
- self.patchup({
- '/dev/sda': {
- 'partitions': {
- '/dev/sda1': {'num': 1, 'fs': 'ntfs',
- 'files': ['file1.txt', 'file2.exe']},
- }}})
- with mock.patch.object(dsaz.LOG, 'warning') as warning:
- value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
- preserve_ntfs=False)
- wmsg = warning.call_args[0][0]
- self.assertIn("looks like you're using NTFS on the ephemeral disk",
- wmsg)
- self.assertFalse(value)
- self.assertIn("files on it", msg.lower())
-
- def test_one_partition_ntfs_empty_is_true(self):
- """1 mountable ntfs partition and no files can be formatted."""
- self.patchup({
- '/dev/sda': {
- 'partitions': {
- '/dev/sda1': {'num': 1, 'fs': 'ntfs', 'files': []}
- }}})
- value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
- preserve_ntfs=False)
- self.assertTrue(value)
- self.assertIn("safe for", msg.lower())
-
- def test_one_partition_ntfs_empty_with_dataloss_file_is_true(self):
- """1 mountable ntfs partition and only warn file can be formatted."""
- self.patchup({
- '/dev/sda': {
- 'partitions': {
- '/dev/sda1': {'num': 1, 'fs': 'ntfs',
- 'files': ['dataloss_warning_readme.txt']}
- }}})
- value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
- preserve_ntfs=False)
- self.assertTrue(value)
- self.assertIn("safe for", msg.lower())
-
- def test_one_partition_through_realpath_is_true(self):
- """A symlink to a device with 1 ntfs partition can be formatted."""
- epath = '/dev/disk/cloud/azure_resource'
- self.patchup({
- epath: {
- 'realpath': '/dev/sdb',
- 'partitions': {
- epath + '-part1': {
- 'num': 1, 'fs': 'ntfs', 'files': [self.warning_file],
- 'realpath': '/dev/sdb1'}
- }}})
- value, msg = dsaz.can_dev_be_reformatted(epath,
- preserve_ntfs=False)
- self.assertTrue(value)
- self.assertIn("safe for", msg.lower())
-
- def test_three_partition_through_realpath_is_false(self):
- """A symlink to a device with 3 partitions can not be formatted."""
- epath = '/dev/disk/cloud/azure_resource'
- self.patchup({
- epath: {
- 'realpath': '/dev/sdb',
- 'partitions': {
- epath + '-part1': {
- 'num': 1, 'fs': 'ntfs', 'files': [self.warning_file],
- 'realpath': '/dev/sdb1'},
- epath + '-part2': {'num': 2, 'fs': 'ext3',
- 'realpath': '/dev/sdb2'},
- epath + '-part3': {'num': 3, 'fs': 'ext',
- 'realpath': '/dev/sdb3'}
- }}})
- value, msg = dsaz.can_dev_be_reformatted(epath,
- preserve_ntfs=False)
- self.assertFalse(value)
- self.assertIn("3 or more", msg.lower())
-
- def test_ntfs_mount_errors_true(self):
- """can_dev_be_reformatted does not fail if NTFS is unknown fstype."""
- self.patchup({
- '/dev/sda': {
- 'partitions': {
- '/dev/sda1': {'num': 1, 'fs': 'ntfs', 'files': []}
- }}})
-
- error_msgs = [
- "Stderr: mount: unknown filesystem type 'ntfs'", # RHEL
- "Stderr: mount: /dev/sdb1: unknown filesystem type 'ntfs'" # SLES
- ]
-
- for err_msg in error_msgs:
- self.m_mount_cb.side_effect = MountFailedError(
- "Failed mounting %s to %s due to: \nUnexpected.\n%s" %
- ('/dev/sda', '/fake-tmp/dir', err_msg))
-
- value, msg = dsaz.can_dev_be_reformatted('/dev/sda',
- preserve_ntfs=False)
- self.assertTrue(value)
- self.assertIn('cannot mount NTFS, assuming', msg)
-
- def test_never_destroy_ntfs_config_false(self):
- """Normally formattable situation with never_destroy_ntfs set."""
- self.patchup({
- '/dev/sda': {
- 'partitions': {
- '/dev/sda1': {'num': 1, 'fs': 'ntfs',
- 'files': ['dataloss_warning_readme.txt']}
- }}})
- value, msg = dsaz.can_dev_be_reformatted("/dev/sda",
- preserve_ntfs=True)
- self.assertFalse(value)
- self.assertIn("config says to never destroy NTFS "
- "(datasource.Azure.never_destroy_ntfs)", msg)
-
-
-class TestClearCachedData(CiTestCase):
-
- def test_clear_cached_attrs_clears_imds(self):
- """All class attributes are reset to defaults, including imds data."""
- tmp = self.tmp_dir()
- paths = helpers.Paths(
- {'cloud_dir': tmp, 'run_dir': tmp})
- dsrc = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=paths)
- clean_values = [dsrc.metadata, dsrc.userdata, dsrc._metadata_imds]
- dsrc.metadata = 'md'
- dsrc.userdata = 'ud'
- dsrc._metadata_imds = 'imds'
- dsrc._dirty_cache = True
- dsrc.clear_cached_attrs()
- self.assertEqual(
- [dsrc.metadata, dsrc.userdata, dsrc._metadata_imds],
- clean_values)
-
-
-class TestAzureNetExists(CiTestCase):
-
- def test_azure_net_must_exist_for_legacy_objpkl(self):
- """DataSourceAzureNet must exist for old obj.pkl files
- that reference it."""
- self.assertTrue(hasattr(dsaz, "DataSourceAzureNet"))
-
-
-class TestPreprovisioningReadAzureOvfFlag(CiTestCase):
-
- def test_read_azure_ovf_with_true_flag(self):
- """The read_azure_ovf method should set the PreprovisionedVM
- cfg flag if the proper setting is present."""
- content = construct_valid_ovf_env(
- platform_settings={"PreprovisionedVm": "True"})
- ret = dsaz.read_azure_ovf(content)
- cfg = ret[2]
- self.assertTrue(cfg['PreprovisionedVm'])
-
- def test_read_azure_ovf_with_false_flag(self):
- """The read_azure_ovf method should set the PreprovisionedVM
- cfg flag to false if the proper setting is false."""
- content = construct_valid_ovf_env(
- platform_settings={"PreprovisionedVm": "False"})
- ret = dsaz.read_azure_ovf(content)
- cfg = ret[2]
- self.assertFalse(cfg['PreprovisionedVm'])
-
- def test_read_azure_ovf_without_flag(self):
- """The read_azure_ovf method should not set the
- PreprovisionedVM cfg flag."""
- content = construct_valid_ovf_env()
- ret = dsaz.read_azure_ovf(content)
- cfg = ret[2]
- self.assertFalse(cfg['PreprovisionedVm'])
- self.assertEqual(None, cfg["PreprovisionedVMType"])
-
- def test_read_azure_ovf_with_running_type(self):
- """The read_azure_ovf method should set PreprovisionedVMType
- cfg flag to Running."""
- content = construct_valid_ovf_env(
- platform_settings={"PreprovisionedVMType": "Running",
- "PreprovisionedVm": "True"})
- ret = dsaz.read_azure_ovf(content)
- cfg = ret[2]
- self.assertTrue(cfg['PreprovisionedVm'])
- self.assertEqual("Running", cfg['PreprovisionedVMType'])
-
- def test_read_azure_ovf_with_savable_type(self):
- """The read_azure_ovf method should set PreprovisionedVMType
- cfg flag to Savable."""
- content = construct_valid_ovf_env(
- platform_settings={"PreprovisionedVMType": "Savable",
- "PreprovisionedVm": "True"})
- ret = dsaz.read_azure_ovf(content)
- cfg = ret[2]
- self.assertTrue(cfg['PreprovisionedVm'])
- self.assertEqual("Savable", cfg['PreprovisionedVMType'])
-
-
-@mock.patch('os.path.isfile')
-class TestPreprovisioningShouldReprovision(CiTestCase):
-
- def setUp(self):
- super(TestPreprovisioningShouldReprovision, self).setUp()
- tmp = self.tmp_dir()
- self.waagent_d = self.tmp_path('/var/lib/waagent', tmp)
- self.paths = helpers.Paths({'cloud_dir': tmp})
- dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
-
- @mock.patch(MOCKPATH + 'util.write_file')
- def test__should_reprovision_with_true_cfg(self, isfile, write_f):
- """The _should_reprovision method should return true with config
- flag present."""
- isfile.return_value = False
- dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
- self.assertTrue(dsa._should_reprovision(
- (None, None, {'PreprovisionedVm': True}, None)))
-
- def test__should_reprovision_with_file_existing(self, isfile):
- """The _should_reprovision method should return True if the sentinal
- exists."""
- isfile.return_value = True
- dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
- self.assertTrue(dsa._should_reprovision(
- (None, None, {'preprovisionedvm': False}, None)))
-
- def test__should_reprovision_returns_false(self, isfile):
- """The _should_reprovision method should return False
- if config and sentinal are not present."""
- isfile.return_value = False
- dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
- self.assertFalse(dsa._should_reprovision((None, None, {}, None)))
-
- @mock.patch(MOCKPATH + 'DataSourceAzure._poll_imds')
- def test_reprovision_calls__poll_imds(self, _poll_imds, isfile):
- """_reprovision will poll IMDS."""
- isfile.return_value = False
- hostname = "myhost"
- username = "myuser"
- odata = {'HostName': hostname, 'UserName': username}
- _poll_imds.return_value = construct_valid_ovf_env(data=odata)
- dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
- dsa._reprovision()
- _poll_imds.assert_called_with()
-
-
-class TestPreprovisioningHotAttachNics(CiTestCase):
-
- def setUp(self):
- super(TestPreprovisioningHotAttachNics, self).setUp()
- self.tmp = self.tmp_dir()
- self.waagent_d = self.tmp_path('/var/lib/waagent', self.tmp)
- self.paths = helpers.Paths({'cloud_dir': self.tmp})
- dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
- self.paths = helpers.Paths({'cloud_dir': self.tmp})
-
- @mock.patch('cloudinit.sources.helpers.netlink.wait_for_nic_detach_event',
- autospec=True)
- @mock.patch(MOCKPATH + 'util.write_file', autospec=True)
- def test_nic_detach_writes_marker(self, m_writefile, m_detach):
- """When we detect that a nic gets detached, we write a marker for it"""
- dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
- nl_sock = mock.MagicMock()
- dsa._wait_for_nic_detach(nl_sock)
- m_detach.assert_called_with(nl_sock)
- self.assertEqual(1, m_detach.call_count)
- m_writefile.assert_called_with(
- dsaz.REPROVISION_NIC_DETACHED_MARKER_FILE, mock.ANY)
-
- @mock.patch(MOCKPATH + 'util.write_file', autospec=True)
- @mock.patch(MOCKPATH + 'DataSourceAzure.fallback_interface')
- @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting')
- @mock.patch(MOCKPATH + 'DataSourceAzure._report_ready')
- @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach')
- def test_detect_nic_attach_reports_ready_and_waits_for_detach(
- self, m_detach, m_report_ready, m_dhcp, m_fallback_if,
- m_writefile):
- """Report ready first and then wait for nic detach"""
- dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
- dsa._wait_for_all_nics_ready()
- m_fallback_if.return_value = "Dummy interface"
- self.assertEqual(1, m_report_ready.call_count)
- self.assertEqual(1, m_detach.call_count)
- self.assertEqual(1, m_writefile.call_count)
- self.assertEqual(1, m_dhcp.call_count)
- m_writefile.assert_called_with(dsaz.REPORTED_READY_MARKER_FILE,
- mock.ANY)
-
- @mock.patch('os.path.isfile')
- @mock.patch(MOCKPATH + 'DataSourceAzure.fallback_interface')
- @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting')
- @mock.patch(MOCKPATH + 'DataSourceAzure._report_ready')
- @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach')
- def test_detect_nic_attach_skips_report_ready_when_marker_present(
- self, m_detach, m_report_ready, m_dhcp, m_fallback_if, m_isfile):
- """Skip reporting ready if we already have a marker file."""
- dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
-
- def isfile(key):
- return key == dsaz.REPORTED_READY_MARKER_FILE
-
- m_isfile.side_effect = isfile
- dsa._wait_for_all_nics_ready()
- m_fallback_if.return_value = "Dummy interface"
- self.assertEqual(0, m_report_ready.call_count)
- self.assertEqual(0, m_dhcp.call_count)
- self.assertEqual(1, m_detach.call_count)
-
- @mock.patch('os.path.isfile')
- @mock.patch(MOCKPATH + 'DataSourceAzure.fallback_interface')
- @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting')
- @mock.patch(MOCKPATH + 'DataSourceAzure._report_ready')
- @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach')
- def test_detect_nic_attach_skips_nic_detach_when_marker_present(
- self, m_detach, m_report_ready, m_dhcp, m_fallback_if, m_isfile):
- """Skip wait for nic detach if it already happened."""
- dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
-
- m_isfile.return_value = True
- dsa._wait_for_all_nics_ready()
- m_fallback_if.return_value = "Dummy interface"
- self.assertEqual(0, m_report_ready.call_count)
- self.assertEqual(0, m_dhcp.call_count)
- self.assertEqual(0, m_detach.call_count)
-
- @mock.patch(MOCKPATH + 'DataSourceAzure.wait_for_link_up', autospec=True)
- @mock.patch('cloudinit.sources.helpers.netlink.wait_for_nic_attach_event')
- @mock.patch('cloudinit.sources.net.find_fallback_nic')
- @mock.patch(MOCKPATH + 'get_metadata_from_imds')
- @mock.patch(MOCKPATH + 'EphemeralDHCPv4')
- @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach')
- @mock.patch('os.path.isfile')
- def test_wait_for_nic_attach_if_no_fallback_interface(
- self, m_isfile, m_detach, m_dhcpv4, m_imds, m_fallback_if,
- m_attach, m_link_up):
- """Wait for nic attach if we do not have a fallback interface"""
- dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
- lease = {
- 'interface': 'eth9', 'fixed-address': '192.168.2.9',
- 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
- 'unknown-245': '624c3620'}
-
- m_isfile.return_value = True
- m_attach.return_value = "eth0"
- dhcp_ctx = mock.MagicMock(lease=lease)
- dhcp_ctx.obtain_lease.return_value = lease
- m_dhcpv4.return_value = dhcp_ctx
- m_imds.return_value = IMDS_NETWORK_METADATA
- m_fallback_if.return_value = None
-
- dsa._wait_for_all_nics_ready()
-
- self.assertEqual(0, m_detach.call_count)
- self.assertEqual(1, m_attach.call_count)
- self.assertEqual(1, m_dhcpv4.call_count)
- self.assertEqual(1, m_imds.call_count)
- self.assertEqual(1, m_link_up.call_count)
- m_link_up.assert_called_with(mock.ANY, "eth0")
-
- @mock.patch(MOCKPATH + 'DataSourceAzure.wait_for_link_up')
- @mock.patch('cloudinit.sources.helpers.netlink.wait_for_nic_attach_event')
- @mock.patch('cloudinit.sources.net.find_fallback_nic')
- @mock.patch(MOCKPATH + 'get_metadata_from_imds')
- @mock.patch(MOCKPATH + 'EphemeralDHCPv4')
- @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach')
- @mock.patch('os.path.isfile')
- def test_wait_for_nic_attach_multinic_attach(
- self, m_isfile, m_detach, m_dhcpv4, m_imds, m_fallback_if,
- m_attach, m_link_up):
- """Wait for nic attach if we do not have a fallback interface"""
- dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
- lease = {
- 'interface': 'eth9', 'fixed-address': '192.168.2.9',
- 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
- 'unknown-245': '624c3620'}
- m_attach_call_count = 0
-
- def nic_attach_ret(nl_sock, nics_found):
- nonlocal m_attach_call_count
- if m_attach_call_count == 0:
- m_attach_call_count = m_attach_call_count + 1
- return "eth0"
- return "eth1"
-
- def network_metadata_ret(ifname, retries, type):
- # Simulate two NICs by adding the same one twice.
- md = IMDS_NETWORK_METADATA
- md['interface'].append(md['interface'][0])
- if ifname == "eth0":
- return md
- raise requests.Timeout('Fake connection timeout')
-
- m_isfile.return_value = True
- m_attach.side_effect = nic_attach_ret
- dhcp_ctx = mock.MagicMock(lease=lease)
- dhcp_ctx.obtain_lease.return_value = lease
- m_dhcpv4.return_value = dhcp_ctx
- m_imds.side_effect = network_metadata_ret
- m_fallback_if.return_value = None
-
- dsa._wait_for_all_nics_ready()
-
- self.assertEqual(0, m_detach.call_count)
- self.assertEqual(2, m_attach.call_count)
- # DHCP and network metadata calls will only happen on the primary NIC.
- self.assertEqual(1, m_dhcpv4.call_count)
- self.assertEqual(1, m_imds.call_count)
- self.assertEqual(2, m_link_up.call_count)
-
- @mock.patch('cloudinit.distros.networking.LinuxNetworking.try_set_link_up')
- def test_wait_for_link_up_returns_if_already_up(
- self, m_is_link_up):
- """Waiting for link to be up should return immediately if the link is
- already up."""
-
- distro_cls = distros.fetch('ubuntu')
- distro = distro_cls('ubuntu', {}, self.paths)
- dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths)
- m_is_link_up.return_value = True
-
- dsa.wait_for_link_up("eth0")
- self.assertEqual(1, m_is_link_up.call_count)
-
- @mock.patch(MOCKPATH + 'util.write_file')
- @mock.patch('cloudinit.net.read_sys_net')
- @mock.patch('cloudinit.distros.networking.LinuxNetworking.try_set_link_up')
- def test_wait_for_link_up_writes_to_device_file(
- self, m_is_link_up, m_read_sys_net, m_writefile):
- """Waiting for link to be up should return immediately if the link is
- already up."""
-
- distro_cls = distros.fetch('ubuntu')
- distro = distro_cls('ubuntu', {}, self.paths)
- dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths)
-
- callcount = 0
-
- def linkup(key):
- nonlocal callcount
- if callcount == 0:
- callcount += 1
- return False
- return True
-
- m_is_link_up.side_effect = linkup
-
- dsa.wait_for_link_up("eth0")
- self.assertEqual(2, m_is_link_up.call_count)
- self.assertEqual(1, m_read_sys_net.call_count)
- self.assertEqual(2, m_writefile.call_count)
-
- @mock.patch('cloudinit.sources.helpers.netlink.'
- 'create_bound_netlink_socket')
- def test_wait_for_all_nics_ready_raises_if_socket_fails(self, m_socket):
- """Waiting for all nics should raise exception if netlink socket
- creation fails."""
-
- m_socket.side_effect = netlink.NetlinkCreateSocketError
- distro_cls = distros.fetch('ubuntu')
- distro = distro_cls('ubuntu', {}, self.paths)
- dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths)
-
- self.assertRaises(netlink.NetlinkCreateSocketError,
- dsa._wait_for_all_nics_ready)
- # dsa._wait_for_all_nics_ready()
-
-
-@mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network')
-@mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
-@mock.patch('cloudinit.sources.helpers.netlink.'
- 'wait_for_media_disconnect_connect')
-@mock.patch('requests.Session.request')
-@mock.patch(MOCKPATH + 'DataSourceAzure._report_ready')
-class TestPreprovisioningPollIMDS(CiTestCase):
-
- def setUp(self):
- super(TestPreprovisioningPollIMDS, self).setUp()
- self.tmp = self.tmp_dir()
- self.waagent_d = self.tmp_path('/var/lib/waagent', self.tmp)
- self.paths = helpers.Paths({'cloud_dir': self.tmp})
- dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
-
- @mock.patch('time.sleep', mock.MagicMock())
- @mock.patch(MOCKPATH + 'EphemeralDHCPv4')
- def test_poll_imds_re_dhcp_on_timeout(self, m_dhcpv4, m_report_ready,
- m_request, m_media_switch, m_dhcp,
- m_net):
- """The poll_imds will retry DHCP on IMDS timeout."""
- report_file = self.tmp_path('report_marker', self.tmp)
- lease = {
- 'interface': 'eth9', 'fixed-address': '192.168.2.9',
- 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
- 'unknown-245': '624c3620'}
- m_dhcp.return_value = [lease]
- m_media_switch.return_value = None
- dhcp_ctx = mock.MagicMock(lease=lease)
- dhcp_ctx.obtain_lease.return_value = lease
- m_dhcpv4.return_value = dhcp_ctx
-
- self.tries = 0
-
- def fake_timeout_once(**kwargs):
- self.tries += 1
- if self.tries == 1:
- raise requests.Timeout('Fake connection timeout')
- elif self.tries in (2, 3):
- response = requests.Response()
- response.status_code = 404 if self.tries == 2 else 410
- raise requests.exceptions.HTTPError(
- "fake {}".format(response.status_code), response=response
- )
- # Third try should succeed and stop retries or redhcp
- return mock.MagicMock(status_code=200, text="good", content="good")
-
- m_request.side_effect = fake_timeout_once
-
- dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
- with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file):
- dsa._poll_imds()
- self.assertEqual(m_report_ready.call_count, 1)
- m_report_ready.assert_called_with(lease=lease)
- self.assertEqual(3, m_dhcpv4.call_count, 'Expected 3 DHCP calls')
- self.assertEqual(4, self.tries, 'Expected 4 total reads from IMDS')
-
- @mock.patch('os.path.isfile')
- def test_poll_imds_skips_dhcp_if_ctx_present(
- self, m_isfile, report_ready_func, fake_resp, m_media_switch,
- m_dhcp, m_net):
- """The poll_imds function should reuse the dhcp ctx if it is already
- present. This happens when we wait for nic to be hot-attached before
- polling for reprovisiondata. Note that if this ctx is set when
- _poll_imds is called, then it is not expected to be waiting for
- media_disconnect_connect either."""
- report_file = self.tmp_path('report_marker', self.tmp)
- m_isfile.return_value = True
- dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
- dsa._ephemeral_dhcp_ctx = "Dummy dhcp ctx"
- with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file):
- dsa._poll_imds()
- self.assertEqual(0, m_dhcp.call_count)
- self.assertEqual(0, m_media_switch.call_count)
-
- def test_does_not_poll_imds_report_ready_when_marker_file_exists(
- self, m_report_ready, m_request, m_media_switch, m_dhcp, m_net):
- """poll_imds should not call report ready when the reported ready
- marker file exists"""
- report_file = self.tmp_path('report_marker', self.tmp)
- write_file(report_file, content='dont run report_ready :)')
- m_dhcp.return_value = [{
- 'interface': 'eth9', 'fixed-address': '192.168.2.9',
- 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
- 'unknown-245': '624c3620'}]
- m_media_switch.return_value = None
- dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
- with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file):
- dsa._poll_imds()
- self.assertEqual(m_report_ready.call_count, 0)
-
- def test_poll_imds_report_ready_success_writes_marker_file(
- self, m_report_ready, m_request, m_media_switch, m_dhcp, m_net):
- """poll_imds should write the report_ready marker file if
- reporting ready succeeds"""
- report_file = self.tmp_path('report_marker', self.tmp)
- m_dhcp.return_value = [{
- 'interface': 'eth9', 'fixed-address': '192.168.2.9',
- 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
- 'unknown-245': '624c3620'}]
- m_media_switch.return_value = None
- dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
- self.assertFalse(os.path.exists(report_file))
- with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file):
- dsa._poll_imds()
- self.assertEqual(m_report_ready.call_count, 1)
- self.assertTrue(os.path.exists(report_file))
-
- def test_poll_imds_report_ready_failure_raises_exc_and_doesnt_write_marker(
- self, m_report_ready, m_request, m_media_switch, m_dhcp, m_net):
- """poll_imds should write the report_ready marker file if
- reporting ready succeeds"""
- report_file = self.tmp_path('report_marker', self.tmp)
- m_dhcp.return_value = [{
- 'interface': 'eth9', 'fixed-address': '192.168.2.9',
- 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
- 'unknown-245': '624c3620'}]
- m_media_switch.return_value = None
- m_report_ready.return_value = False
- dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
- self.assertFalse(os.path.exists(report_file))
- with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file):
- self.assertRaises(
- InvalidMetaDataException,
- dsa._poll_imds)
- self.assertEqual(m_report_ready.call_count, 1)
- self.assertFalse(os.path.exists(report_file))
-
-
-@mock.patch(MOCKPATH + 'DataSourceAzure._report_ready', mock.MagicMock())
-@mock.patch(MOCKPATH + 'subp.subp', mock.MagicMock())
-@mock.patch(MOCKPATH + 'util.write_file', mock.MagicMock())
-@mock.patch(MOCKPATH + 'util.is_FreeBSD')
-@mock.patch('cloudinit.sources.helpers.netlink.'
- 'wait_for_media_disconnect_connect')
-@mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network', autospec=True)
-@mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
-@mock.patch('requests.Session.request')
-class TestAzureDataSourcePreprovisioning(CiTestCase):
-
- def setUp(self):
- super(TestAzureDataSourcePreprovisioning, self).setUp()
- tmp = self.tmp_dir()
- self.waagent_d = self.tmp_path('/var/lib/waagent', tmp)
- self.paths = helpers.Paths({'cloud_dir': tmp})
- dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
-
- def test_poll_imds_returns_ovf_env(self, m_request,
- m_dhcp, m_net,
- m_media_switch,
- m_is_bsd):
- """The _poll_imds method should return the ovf_env.xml."""
- m_is_bsd.return_value = False
- m_media_switch.return_value = None
- m_dhcp.return_value = [{
- 'interface': 'eth9', 'fixed-address': '192.168.2.9',
- 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0'}]
- url = 'http://{0}/metadata/reprovisiondata?api-version=2019-06-01'
- host = "169.254.169.254"
- full_url = url.format(host)
- m_request.return_value = mock.MagicMock(status_code=200, text="ovf",
- content="ovf")
- dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
- self.assertTrue(len(dsa._poll_imds()) > 0)
- self.assertEqual(m_request.call_args_list,
- [mock.call(allow_redirects=True,
- headers={'Metadata': 'true',
- 'User-Agent':
- 'Cloud-Init/%s' % vs()
- }, method='GET',
- timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS,
- url=full_url)])
- self.assertEqual(m_dhcp.call_count, 2)
- m_net.assert_any_call(
- broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9',
- prefix_or_mask='255.255.255.0', router='192.168.2.1',
- static_routes=None)
- self.assertEqual(m_net.call_count, 2)
-
- def test__reprovision_calls__poll_imds(self, m_request,
- m_dhcp, m_net,
- m_media_switch,
- m_is_bsd):
- """The _reprovision method should call poll IMDS."""
- m_is_bsd.return_value = False
- m_media_switch.return_value = None
- m_dhcp.return_value = [{
- 'interface': 'eth9', 'fixed-address': '192.168.2.9',
- 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
- 'unknown-245': '624c3620'}]
- url = 'http://{0}/metadata/reprovisiondata?api-version=2019-06-01'
- host = "169.254.169.254"
- full_url = url.format(host)
- hostname = "myhost"
- username = "myuser"
- odata = {'HostName': hostname, 'UserName': username}
- content = construct_valid_ovf_env(data=odata)
- m_request.return_value = mock.MagicMock(status_code=200, text=content,
- content=content)
- dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
- md, _ud, cfg, _d = dsa._reprovision()
- self.assertEqual(md['local-hostname'], hostname)
- self.assertEqual(cfg['system_info']['default_user']['name'], username)
- self.assertIn(
- mock.call(
- allow_redirects=True,
- headers={
- 'Metadata': 'true',
- 'User-Agent': 'Cloud-Init/%s' % vs()
- },
- method='GET',
- timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS,
- url=full_url
- ),
- m_request.call_args_list)
- self.assertEqual(m_dhcp.call_count, 2)
- m_net.assert_any_call(
- broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9',
- prefix_or_mask='255.255.255.0', router='192.168.2.1',
- static_routes=None)
- self.assertEqual(m_net.call_count, 2)
-
-
-class TestRemoveUbuntuNetworkConfigScripts(CiTestCase):
-
- with_logs = True
-
- def setUp(self):
- super(TestRemoveUbuntuNetworkConfigScripts, self).setUp()
- self.tmp = self.tmp_dir()
-
- def test_remove_network_scripts_removes_both_files_and_directories(self):
- """Any files or directories in paths are removed when present."""
- file1 = self.tmp_path('file1', dir=self.tmp)
- subdir = self.tmp_path('sub1', dir=self.tmp)
- subfile = self.tmp_path('leaf1', dir=subdir)
- write_file(file1, 'file1content')
- write_file(subfile, 'leafcontent')
- dsaz.maybe_remove_ubuntu_network_config_scripts(paths=[subdir, file1])
-
- for path in (file1, subdir, subfile):
- self.assertFalse(os.path.exists(path),
- 'Found unremoved: %s' % path)
-
- expected_logs = [
- 'INFO: Removing Ubuntu extended network scripts because cloud-init'
- ' updates Azure network configuration on the following event:'
- ' System boot.',
- 'Recursively deleting %s' % subdir,
- 'Attempting to remove %s' % file1]
- for log in expected_logs:
- self.assertIn(log, self.logs.getvalue())
-
- def test_remove_network_scripts_only_attempts_removal_if_path_exists(self):
- """Any files or directories absent are skipped without error."""
- dsaz.maybe_remove_ubuntu_network_config_scripts(paths=[
- self.tmp_path('nodirhere/', dir=self.tmp),
- self.tmp_path('notfilehere', dir=self.tmp)])
- self.assertNotIn('/not/a', self.logs.getvalue()) # No delete logs
-
- @mock.patch(MOCKPATH + 'os.path.exists')
- def test_remove_network_scripts_default_removes_stock_scripts(self,
- m_exists):
- """Azure's stock ubuntu image scripts and artifacts are removed."""
- # Report path absent on all to avoid delete operation
- m_exists.return_value = False
- dsaz.maybe_remove_ubuntu_network_config_scripts()
- calls = m_exists.call_args_list
- for path in dsaz.UBUNTU_EXTENDED_NETWORK_SCRIPTS:
- self.assertIn(mock.call(path), calls)
-
-
-class TestWBIsPlatformViable(CiTestCase):
- """White box tests for _is_platform_viable."""
- with_logs = True
-
- @mock.patch(MOCKPATH + 'dmi.read_dmi_data')
- def test_true_on_non_azure_chassis(self, m_read_dmi_data):
- """Return True if DMI chassis-asset-tag is AZURE_CHASSIS_ASSET_TAG."""
- m_read_dmi_data.return_value = dsaz.AZURE_CHASSIS_ASSET_TAG
- self.assertTrue(dsaz._is_platform_viable('doesnotmatter'))
-
- @mock.patch(MOCKPATH + 'os.path.exists')
- @mock.patch(MOCKPATH + 'dmi.read_dmi_data')
- def test_true_on_azure_ovf_env_in_seed_dir(self, m_read_dmi_data, m_exist):
- """Return True if ovf-env.xml exists in known seed dirs."""
- # Non-matching Azure chassis-asset-tag
- m_read_dmi_data.return_value = dsaz.AZURE_CHASSIS_ASSET_TAG + 'X'
-
- m_exist.return_value = True
- self.assertTrue(dsaz._is_platform_viable('/some/seed/dir'))
- m_exist.called_once_with('/other/seed/dir')
-
- def test_false_on_no_matching_azure_criteria(self):
- """Report non-azure on unmatched asset tag, ovf-env absent and no dev.
-
- Return False when the asset tag doesn't match Azure's static
- AZURE_CHASSIS_ASSET_TAG, no ovf-env.xml files exist in known seed dirs
- and no devices have a label starting with prefix 'rd_rdfe_'.
- """
- self.assertFalse(wrap_and_call(
- MOCKPATH,
- {'os.path.exists': False,
- # Non-matching Azure chassis-asset-tag
- 'dmi.read_dmi_data': dsaz.AZURE_CHASSIS_ASSET_TAG + 'X',
- 'subp.which': None},
- dsaz._is_platform_viable, 'doesnotmatter'))
- self.assertIn(
- "DEBUG: Non-Azure DMI asset tag '{0}' discovered.\n".format(
- dsaz.AZURE_CHASSIS_ASSET_TAG + 'X'),
- self.logs.getvalue())
-
-
-class TestRandomSeed(CiTestCase):
- """Test proper handling of random_seed"""
-
- def test_non_ascii_seed_is_serializable(self):
- """Pass if a random string from the Azure infrastructure which
- contains at least one non-Unicode character can be converted to/from
- JSON without alteration and without throwing an exception.
- """
- path = resourceLocation("azure/non_unicode_random_string")
- result = dsaz._get_random_seed(path)
-
- obj = {'seed': result}
- try:
- serialized = json_dumps(obj)
- deserialized = load_json(serialized)
- except UnicodeDecodeError:
- self.fail("Non-serializable random seed returned")
-
- self.assertEqual(deserialized['seed'], result)
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py
deleted file mode 100644
index 4ab5d471..00000000
--- a/tests/unittests/test_datasource/test_common.py
+++ /dev/null
@@ -1,110 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit import settings
-from cloudinit import sources
-from cloudinit import type_utils
-from cloudinit.sources import (
- DataSource,
- DataSourceAliYun as AliYun,
- DataSourceAltCloud as AltCloud,
- DataSourceAzure as Azure,
- DataSourceBigstep as Bigstep,
- DataSourceCloudSigma as CloudSigma,
- DataSourceCloudStack as CloudStack,
- DataSourceConfigDrive as ConfigDrive,
- DataSourceDigitalOcean as DigitalOcean,
- DataSourceEc2 as Ec2,
- DataSourceExoscale as Exoscale,
- DataSourceGCE as GCE,
- DataSourceHetzner as Hetzner,
- DataSourceIBMCloud as IBMCloud,
- DataSourceMAAS as MAAS,
- DataSourceNoCloud as NoCloud,
- DataSourceOpenNebula as OpenNebula,
- DataSourceOpenStack as OpenStack,
- DataSourceOracle as Oracle,
- DataSourceOVF as OVF,
- DataSourceRbxCloud as RbxCloud,
- DataSourceScaleway as Scaleway,
- DataSourceSmartOS as SmartOS,
-)
-from cloudinit.sources import DataSourceNone as DSNone
-
-from cloudinit.tests import helpers as test_helpers
-
-DEFAULT_LOCAL = [
- Azure.DataSourceAzure,
- CloudSigma.DataSourceCloudSigma,
- ConfigDrive.DataSourceConfigDrive,
- DigitalOcean.DataSourceDigitalOcean,
- Hetzner.DataSourceHetzner,
- IBMCloud.DataSourceIBMCloud,
- NoCloud.DataSourceNoCloud,
- OpenNebula.DataSourceOpenNebula,
- Oracle.DataSourceOracle,
- OVF.DataSourceOVF,
- SmartOS.DataSourceSmartOS,
- Ec2.DataSourceEc2Local,
- OpenStack.DataSourceOpenStackLocal,
- RbxCloud.DataSourceRbxCloud,
- Scaleway.DataSourceScaleway,
-]
-
-DEFAULT_NETWORK = [
- AliYun.DataSourceAliYun,
- AltCloud.DataSourceAltCloud,
- Bigstep.DataSourceBigstep,
- CloudStack.DataSourceCloudStack,
- DSNone.DataSourceNone,
- Ec2.DataSourceEc2,
- Exoscale.DataSourceExoscale,
- GCE.DataSourceGCE,
- MAAS.DataSourceMAAS,
- NoCloud.DataSourceNoCloudNet,
- OpenStack.DataSourceOpenStack,
- OVF.DataSourceOVFNet,
-]
-
-
-class ExpectedDataSources(test_helpers.TestCase):
- builtin_list = settings.CFG_BUILTIN['datasource_list']
- deps_local = [sources.DEP_FILESYSTEM]
- deps_network = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK]
- pkg_list = [type_utils.obj_name(sources)]
-
- def test_expected_default_local_sources_found(self):
- found = sources.list_sources(
- self.builtin_list, self.deps_local, self.pkg_list)
- self.assertEqual(set(DEFAULT_LOCAL), set(found))
-
- def test_expected_default_network_sources_found(self):
- found = sources.list_sources(
- self.builtin_list, self.deps_network, self.pkg_list)
- self.assertEqual(set(DEFAULT_NETWORK), set(found))
-
- def test_expected_nondefault_network_sources_found(self):
- found = sources.list_sources(
- ['AliYun'], self.deps_network, self.pkg_list)
- self.assertEqual(set([AliYun.DataSourceAliYun]), set(found))
-
-
-class TestDataSourceInvariants(test_helpers.TestCase):
- def test_data_sources_have_valid_network_config_sources(self):
- for ds in DEFAULT_LOCAL + DEFAULT_NETWORK:
- for cfg_src in ds.network_config_sources:
- fail_msg = ('{} has an invalid network_config_sources entry:'
- ' {}'.format(str(ds), cfg_src))
- self.assertTrue(hasattr(sources.NetworkConfigSource, cfg_src),
- fail_msg)
-
- def test_expected_dsname_defined(self):
- for ds in DEFAULT_LOCAL + DEFAULT_NETWORK:
- fail_msg = (
- '{} has an invalid / missing dsname property: {}'.format(
- str(ds), str(ds.dsname)
- )
- )
- self.assertNotEqual(ds.dsname, DataSource.dsname, fail_msg)
- self.assertIsNotNone(ds.dsname)
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py
deleted file mode 100644
index 6f830cc6..00000000
--- a/tests/unittests/test_datasource/test_configdrive.py
+++ /dev/null
@@ -1,837 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from copy import copy, deepcopy
-import json
-import os
-
-from cloudinit import helpers
-from cloudinit.net import eni
-from cloudinit.net import network_state
-from cloudinit import settings
-from cloudinit.sources import DataSourceConfigDrive as ds
-from cloudinit.sources.helpers import openstack
-from cloudinit import util
-
-from cloudinit.tests.helpers import CiTestCase, ExitStack, mock, populate_dir
-
-
-PUBKEY = u'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n'
-EC2_META = {
- 'ami-id': 'ami-00000001',
- 'ami-launch-index': 0,
- 'ami-manifest-path': 'FIXME',
- 'block-device-mapping': {
- 'ami': 'sda1',
- 'ephemeral0': 'sda2',
- 'root': '/dev/sda1',
- 'swap': 'sda3'},
- 'hostname': 'sm-foo-test.novalocal',
- 'instance-action': 'none',
- 'instance-id': 'i-00000001',
- 'instance-type': 'm1.tiny',
- 'local-hostname': 'sm-foo-test.novalocal',
- 'local-ipv4': None,
- 'placement': {'availability-zone': 'nova'},
- 'public-hostname': 'sm-foo-test.novalocal',
- 'public-ipv4': '',
- 'public-keys': {'0': {'openssh-key': PUBKEY}},
- 'reservation-id': 'r-iru5qm4m',
- 'security-groups': ['default']
-}
-USER_DATA = b'#!/bin/sh\necho This is user data\n'
-OSTACK_META = {
- 'availability_zone': 'nova',
- 'files': [{'content_path': '/content/0000', 'path': '/etc/foo.cfg'},
- {'content_path': '/content/0001', 'path': '/etc/bar/bar.cfg'}],
- 'hostname': 'sm-foo-test.novalocal',
- 'meta': {'dsmode': 'local', 'my-meta': 'my-value'},
- 'name': 'sm-foo-test',
- 'public_keys': {'mykey': PUBKEY},
- 'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c'}
-
-CONTENT_0 = b'This is contents of /etc/foo.cfg\n'
-CONTENT_1 = b'# this is /etc/bar/bar.cfg\n'
-NETWORK_DATA = {
- 'services': [
- {'type': 'dns', 'address': '199.204.44.24'},
- {'type': 'dns', 'address': '199.204.47.54'}
- ],
- 'links': [
- {'vif_id': '2ecc7709-b3f7-4448-9580-e1ec32d75bbd',
- 'ethernet_mac_address': 'fa:16:3e:69:b0:58',
- 'type': 'ovs', 'mtu': None, 'id': 'tap2ecc7709-b3'},
- {'vif_id': '2f88d109-5b57-40e6-af32-2472df09dc33',
- 'ethernet_mac_address': 'fa:16:3e:d4:57:ad',
- 'type': 'ovs', 'mtu': None, 'id': 'tap2f88d109-5b'},
- {'vif_id': '1a5382f8-04c5-4d75-ab98-d666c1ef52cc',
- 'ethernet_mac_address': 'fa:16:3e:05:30:fe',
- 'type': 'ovs', 'mtu': None, 'id': 'tap1a5382f8-04', 'name': 'nic0'}
- ],
- 'networks': [
- {'link': 'tap2ecc7709-b3', 'type': 'ipv4_dhcp',
- 'network_id': '6d6357ac-0f70-4afa-8bd7-c274cc4ea235',
- 'id': 'network0'},
- {'link': 'tap2f88d109-5b', 'type': 'ipv4_dhcp',
- 'network_id': 'd227a9b3-6960-4d94-8976-ee5788b44f54',
- 'id': 'network1'},
- {'link': 'tap1a5382f8-04', 'type': 'ipv4_dhcp',
- 'network_id': 'dab2ba57-cae2-4311-a5ed-010b263891f5',
- 'id': 'network2'}
- ]
-}
-
-NETWORK_DATA_2 = {
- "services": [
- {"type": "dns", "address": "1.1.1.191"},
- {"type": "dns", "address": "1.1.1.4"}],
- "networks": [
- {"network_id": "d94bbe94-7abc-48d4-9c82-4628ea26164a", "type": "ipv4",
- "netmask": "255.255.255.248", "link": "eth0",
- "routes": [{"netmask": "0.0.0.0", "network": "0.0.0.0",
- "gateway": "2.2.2.9"}],
- "ip_address": "2.2.2.10", "id": "network0-ipv4"},
- {"network_id": "ca447c83-6409-499b-aaef-6ad1ae995348", "type": "ipv4",
- "netmask": "255.255.255.224", "link": "eth1",
- "routes": [], "ip_address": "3.3.3.24", "id": "network1-ipv4"}],
- "links": [
- {"ethernet_mac_address": "fa:16:3e:dd:50:9a", "mtu": 1500,
- "type": "vif", "id": "eth0", "vif_id": "vif-foo1"},
- {"ethernet_mac_address": "fa:16:3e:a8:14:69", "mtu": 1500,
- "type": "vif", "id": "eth1", "vif_id": "vif-foo2"}]
-}
-
-# This network data ha 'tap' or null type for a link.
-NETWORK_DATA_3 = {
- "services": [{"type": "dns", "address": "172.16.36.11"},
- {"type": "dns", "address": "172.16.36.12"}],
- "networks": [
- {"network_id": "7c41450c-ba44-401a-9ab1-1604bb2da51e",
- "type": "ipv4", "netmask": "255.255.255.128",
- "link": "tap77a0dc5b-72", "ip_address": "172.17.48.18",
- "id": "network0",
- "routes": [{"netmask": "0.0.0.0", "network": "0.0.0.0",
- "gateway": "172.17.48.1"}]},
- {"network_id": "7c41450c-ba44-401a-9ab1-1604bb2da51e",
- "type": "ipv6", "netmask": "ffff:ffff:ffff:ffff::",
- "link": "tap77a0dc5b-72",
- "ip_address": "fdb8:52d0:9d14:0:f816:3eff:fe9f:70d",
- "id": "network1",
- "routes": [{"netmask": "::", "network": "::",
- "gateway": "fdb8:52d0:9d14::1"}]},
- {"network_id": "1f53cb0e-72d3-47c7-94b9-ff4397c5fe54",
- "type": "ipv4", "netmask": "255.255.255.128",
- "link": "tap7d6b7bec-93", "ip_address": "172.16.48.13",
- "id": "network2",
- "routes": [{"netmask": "0.0.0.0", "network": "0.0.0.0",
- "gateway": "172.16.48.1"},
- {"netmask": "255.255.0.0", "network": "172.16.0.0",
- "gateway": "172.16.48.1"}]}],
- "links": [
- {"ethernet_mac_address": "fa:16:3e:dd:50:9a", "mtu": None,
- "type": "tap", "id": "tap77a0dc5b-72",
- "vif_id": "77a0dc5b-720e-41b7-bfa7-1b2ff62e0d48"},
- {"ethernet_mac_address": "fa:16:3e:a8:14:69", "mtu": None,
- "type": None, "id": "tap7d6b7bec-93",
- "vif_id": "7d6b7bec-93e6-4c03-869a-ddc5014892d5"}
- ]
-}
-
-BOND_MAC = "fa:16:3e:b3:72:36"
-NETWORK_DATA_BOND = {
- "services": [
- {"type": "dns", "address": "1.1.1.191"},
- {"type": "dns", "address": "1.1.1.4"},
- ],
- "networks": [
- {"id": "network2-ipv4", "ip_address": "2.2.2.13",
- "link": "vlan2", "netmask": "255.255.255.248",
- "network_id": "4daf5ce8-38cf-4240-9f1a-04e86d7c6117",
- "type": "ipv4",
- "routes": [{"netmask": "0.0.0.0", "network": "0.0.0.0",
- "gateway": "2.2.2.9"}]},
- {"id": "network3-ipv4", "ip_address": "10.0.1.5",
- "link": "vlan3", "netmask": "255.255.255.248",
- "network_id": "a9e2f47c-3c43-4782-94d0-e1eeef1c8c9d",
- "type": "ipv4",
- "routes": [{"netmask": "255.255.255.255",
- "network": "192.168.1.0", "gateway": "10.0.1.1"}]}
- ],
- "links": [
- {"ethernet_mac_address": "0c:c4:7a:34:6e:3c",
- "id": "eth0", "mtu": 1500, "type": "phy"},
- {"ethernet_mac_address": "0c:c4:7a:34:6e:3d",
- "id": "eth1", "mtu": 1500, "type": "phy"},
- {"bond_links": ["eth0", "eth1"],
- "bond_miimon": 100, "bond_mode": "4",
- "bond_xmit_hash_policy": "layer3+4",
- "ethernet_mac_address": BOND_MAC,
- "id": "bond0", "type": "bond"},
- {"ethernet_mac_address": "fa:16:3e:b3:72:30",
- "id": "vlan2", "type": "vlan", "vlan_id": 602,
- "vlan_link": "bond0", "vlan_mac_address": "fa:16:3e:b3:72:30"},
- {"ethernet_mac_address": "fa:16:3e:66:ab:a6",
- "id": "vlan3", "type": "vlan", "vlan_id": 612, "vlan_link": "bond0",
- "vlan_mac_address": "fa:16:3e:66:ab:a6"}
- ]
-}
-
-NETWORK_DATA_VLAN = {
- "services": [{"type": "dns", "address": "1.1.1.191"}],
- "networks": [
- {"id": "network1-ipv4", "ip_address": "10.0.1.5",
- "link": "vlan1", "netmask": "255.255.255.248",
- "network_id": "a9e2f47c-3c43-4782-94d0-e1eeef1c8c9d",
- "type": "ipv4",
- "routes": [{"netmask": "255.255.255.255",
- "network": "192.168.1.0", "gateway": "10.0.1.1"}]}
- ],
- "links": [
- {"ethernet_mac_address": "fa:16:3e:69:b0:58",
- "id": "eth0", "mtu": 1500, "type": "phy"},
- {"ethernet_mac_address": "fa:16:3e:b3:72:30",
- "id": "vlan1", "type": "vlan", "vlan_id": 602,
- "vlan_link": "eth0", "vlan_mac_address": "fa:16:3e:b3:72:30"},
- ]
-}
-
-KNOWN_MACS = {
- 'fa:16:3e:69:b0:58': 'enp0s1',
- 'fa:16:3e:d4:57:ad': 'enp0s2',
- 'fa:16:3e:dd:50:9a': 'foo1',
- 'fa:16:3e:a8:14:69': 'foo2',
- 'fa:16:3e:ed:9a:59': 'foo3',
- '0c:c4:7a:34:6e:3d': 'oeth1',
- '0c:c4:7a:34:6e:3c': 'oeth0',
-}
-
-CFG_DRIVE_FILES_V2 = {
- 'ec2/2009-04-04/meta-data.json': json.dumps(EC2_META),
- 'ec2/2009-04-04/user-data': USER_DATA,
- 'ec2/latest/meta-data.json': json.dumps(EC2_META),
- 'ec2/latest/user-data': USER_DATA,
- 'openstack/2012-08-10/meta_data.json': json.dumps(OSTACK_META),
- 'openstack/2012-08-10/user_data': USER_DATA,
- 'openstack/content/0000': CONTENT_0,
- 'openstack/content/0001': CONTENT_1,
- 'openstack/latest/meta_data.json': json.dumps(OSTACK_META),
- 'openstack/latest/user_data': USER_DATA,
- 'openstack/latest/network_data.json': json.dumps(NETWORK_DATA),
- 'openstack/2015-10-15/meta_data.json': json.dumps(OSTACK_META),
- 'openstack/2015-10-15/user_data': USER_DATA,
- 'openstack/2015-10-15/network_data.json': json.dumps(NETWORK_DATA)}
-
-M_PATH = "cloudinit.sources.DataSourceConfigDrive."
-
-
-class TestConfigDriveDataSource(CiTestCase):
-
- def setUp(self):
- super(TestConfigDriveDataSource, self).setUp()
- self.add_patch(
- M_PATH + "util.find_devs_with",
- "m_find_devs_with", return_value=[])
- self.tmp = self.tmp_dir()
-
- def test_ec2_metadata(self):
- populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
- found = ds.read_config_drive(self.tmp)
- self.assertTrue('ec2-metadata' in found)
- ec2_md = found['ec2-metadata']
- self.assertEqual(EC2_META, ec2_md)
-
- def test_dev_os_remap(self):
- populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
- cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN,
- None,
- helpers.Paths({}))
- found = ds.read_config_drive(self.tmp)
- cfg_ds.metadata = found['metadata']
- name_tests = {
- 'ami': '/dev/vda1',
- 'root': '/dev/vda1',
- 'ephemeral0': '/dev/vda2',
- 'swap': '/dev/vda3',
- }
- for name, dev_name in name_tests.items():
- with ExitStack() as mocks:
- provided_name = dev_name[len('/dev/'):]
- provided_name = "s" + provided_name[1:]
- find_mock = mocks.enter_context(
- mock.patch.object(util, 'find_devs_with',
- return_value=[provided_name]))
- # We want os.path.exists() to return False on its first call,
- # and True on its second call. We use a handy generator as
- # the mock side effect for this. The mocked function returns
- # what the side effect returns.
-
- def exists_side_effect():
- yield False
- yield True
- exists_mock = mocks.enter_context(
- mock.patch.object(os.path, 'exists',
- side_effect=exists_side_effect()))
- self.assertEqual(dev_name, cfg_ds.device_name_to_device(name))
-
- find_mock.assert_called_once_with(mock.ANY)
- self.assertEqual(exists_mock.call_count, 2)
-
- def test_dev_os_map(self):
- populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
- cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN,
- None,
- helpers.Paths({}))
- found = ds.read_config_drive(self.tmp)
- os_md = found['metadata']
- cfg_ds.metadata = os_md
- name_tests = {
- 'ami': '/dev/vda1',
- 'root': '/dev/vda1',
- 'ephemeral0': '/dev/vda2',
- 'swap': '/dev/vda3',
- }
- for name, dev_name in name_tests.items():
- with ExitStack() as mocks:
- find_mock = mocks.enter_context(
- mock.patch.object(util, 'find_devs_with',
- return_value=[dev_name]))
- exists_mock = mocks.enter_context(
- mock.patch.object(os.path, 'exists',
- return_value=True))
- self.assertEqual(dev_name, cfg_ds.device_name_to_device(name))
-
- find_mock.assert_called_once_with(mock.ANY)
- exists_mock.assert_called_once_with(mock.ANY)
-
- def test_dev_ec2_remap(self):
- populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
- cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN,
- None,
- helpers.Paths({}))
- found = ds.read_config_drive(self.tmp)
- ec2_md = found['ec2-metadata']
- os_md = found['metadata']
- cfg_ds.ec2_metadata = ec2_md
- cfg_ds.metadata = os_md
- name_tests = {
- 'ami': '/dev/vda1',
- 'root': '/dev/vda1',
- 'ephemeral0': '/dev/vda2',
- 'swap': '/dev/vda3',
- None: None,
- 'bob': None,
- 'root2k': None,
- }
- for name, dev_name in name_tests.items():
- # We want os.path.exists() to return False on its first call,
- # and True on its second call. We use a handy generator as
- # the mock side effect for this. The mocked function returns
- # what the side effect returns.
- def exists_side_effect():
- yield False
- yield True
- with mock.patch.object(os.path, 'exists',
- side_effect=exists_side_effect()):
- self.assertEqual(dev_name, cfg_ds.device_name_to_device(name))
- # We don't assert the call count for os.path.exists() because
- # not all of the entries in name_tests results in two calls to
- # that function. Specifically, 'root2k' doesn't seem to call
- # it at all.
-
- def test_dev_ec2_map(self):
- populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
- cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN,
- None,
- helpers.Paths({}))
- found = ds.read_config_drive(self.tmp)
- ec2_md = found['ec2-metadata']
- os_md = found['metadata']
- cfg_ds.ec2_metadata = ec2_md
- cfg_ds.metadata = os_md
- name_tests = {
- 'ami': '/dev/sda1',
- 'root': '/dev/sda1',
- 'ephemeral0': '/dev/sda2',
- 'swap': '/dev/sda3',
- None: None,
- 'bob': None,
- 'root2k': None,
- }
- for name, dev_name in name_tests.items():
- with mock.patch.object(os.path, 'exists', return_value=True):
- self.assertEqual(dev_name, cfg_ds.device_name_to_device(name))
-
- def test_dir_valid(self):
- """Verify a dir is read as such."""
-
- populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
-
- found = ds.read_config_drive(self.tmp)
-
- expected_md = copy(OSTACK_META)
- expected_md['instance-id'] = expected_md['uuid']
- expected_md['local-hostname'] = expected_md['hostname']
-
- self.assertEqual(USER_DATA, found['userdata'])
- self.assertEqual(expected_md, found['metadata'])
- self.assertEqual(NETWORK_DATA, found['networkdata'])
- self.assertEqual(found['files']['/etc/foo.cfg'], CONTENT_0)
- self.assertEqual(found['files']['/etc/bar/bar.cfg'], CONTENT_1)
-
- def test_seed_dir_valid_extra(self):
- """Verify extra files do not affect datasource validity."""
-
- data = copy(CFG_DRIVE_FILES_V2)
- data["myfoofile.txt"] = "myfoocontent"
- data["openstack/latest/random-file.txt"] = "random-content"
-
- populate_dir(self.tmp, data)
-
- found = ds.read_config_drive(self.tmp)
-
- expected_md = copy(OSTACK_META)
- expected_md['instance-id'] = expected_md['uuid']
- expected_md['local-hostname'] = expected_md['hostname']
-
- self.assertEqual(expected_md, found['metadata'])
-
- def test_seed_dir_bad_json_metadata(self):
- """Verify that bad json in metadata raises BrokenConfigDriveDir."""
- data = copy(CFG_DRIVE_FILES_V2)
-
- data["openstack/2012-08-10/meta_data.json"] = "non-json garbage {}"
- data["openstack/2015-10-15/meta_data.json"] = "non-json garbage {}"
- data["openstack/latest/meta_data.json"] = "non-json garbage {}"
-
- populate_dir(self.tmp, data)
-
- self.assertRaises(openstack.BrokenMetadata,
- ds.read_config_drive, self.tmp)
-
- def test_seed_dir_no_configdrive(self):
- """Verify that no metadata raises NonConfigDriveDir."""
-
- my_d = os.path.join(self.tmp, "non-configdrive")
- data = copy(CFG_DRIVE_FILES_V2)
- data["myfoofile.txt"] = "myfoocontent"
- data["openstack/latest/random-file.txt"] = "random-content"
- data["content/foo"] = "foocontent"
-
- self.assertRaises(openstack.NonReadable,
- ds.read_config_drive, my_d)
-
- def test_seed_dir_missing(self):
- """Verify that missing seed_dir raises NonConfigDriveDir."""
- my_d = os.path.join(self.tmp, "nonexistantdirectory")
- self.assertRaises(openstack.NonReadable,
- ds.read_config_drive, my_d)
-
- def test_find_candidates(self):
- devs_with_answers = {}
-
- def my_devs_with(*args, **kwargs):
- criteria = args[0] if len(args) else kwargs.pop('criteria', None)
- return devs_with_answers.get(criteria, [])
-
- def my_is_partition(dev):
- return dev[-1] in "0123456789" and not dev.startswith("sr")
-
- try:
- orig_find_devs_with = util.find_devs_with
- util.find_devs_with = my_devs_with
-
- orig_is_partition = util.is_partition
- util.is_partition = my_is_partition
-
- devs_with_answers = {"TYPE=vfat": [],
- "TYPE=iso9660": ["/dev/vdb"],
- "LABEL=config-2": ["/dev/vdb"]}
- self.assertEqual(["/dev/vdb"], ds.find_candidate_devs())
-
- # add a vfat item
- # zdd reverse sorts after vdb, but config-2 label is preferred
- devs_with_answers['TYPE=vfat'] = ["/dev/zdd"]
- self.assertEqual(["/dev/vdb", "/dev/zdd"],
- ds.find_candidate_devs())
-
- # verify that partitions are considered, that have correct label.
- devs_with_answers = {"TYPE=vfat": ["/dev/sda1"],
- "TYPE=iso9660": [],
- "LABEL=config-2": ["/dev/vdb3"]}
- self.assertEqual(["/dev/vdb3"],
- ds.find_candidate_devs())
-
- # Verify that uppercase labels are also found.
- devs_with_answers = {"TYPE=vfat": [],
- "TYPE=iso9660": ["/dev/vdb"],
- "LABEL=CONFIG-2": ["/dev/vdb"]}
- self.assertEqual(["/dev/vdb"], ds.find_candidate_devs())
-
- finally:
- util.find_devs_with = orig_find_devs_with
- util.is_partition = orig_is_partition
-
- @mock.patch(M_PATH + 'on_first_boot')
- def test_pubkeys_v2(self, on_first_boot):
- """Verify that public-keys work in config-drive-v2."""
- myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2)
- self.assertEqual(myds.get_public_ssh_keys(),
- [OSTACK_META['public_keys']['mykey']])
- self.assertEqual('configdrive', myds.cloud_name)
- self.assertEqual('openstack', myds.platform)
- self.assertEqual('seed-dir (%s/seed)' % self.tmp, myds.subplatform)
-
- def test_subplatform_config_drive_when_starts_with_dev(self):
- """subplatform reports config-drive when source starts with /dev/."""
- cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN,
- None,
- helpers.Paths({}))
- with mock.patch(M_PATH + 'find_candidate_devs') as m_find_devs:
- with mock.patch(M_PATH + 'util.is_FreeBSD', return_value=False):
- with mock.patch(M_PATH + 'util.mount_cb'):
- with mock.patch(M_PATH + 'on_first_boot'):
- m_find_devs.return_value = ['/dev/anything']
- self.assertEqual(True, cfg_ds.get_data())
- self.assertEqual('config-disk (/dev/anything)', cfg_ds.subplatform)
-
-
-class TestNetJson(CiTestCase):
- def setUp(self):
- super(TestNetJson, self).setUp()
- self.tmp = self.tmp_dir()
- self.maxDiff = None
-
- @mock.patch(M_PATH + 'on_first_boot')
- def test_network_data_is_found(self, on_first_boot):
- """Verify that network_data is present in ds in config-drive-v2."""
- myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2)
- self.assertIsNotNone(myds.network_json)
-
- @mock.patch(M_PATH + 'on_first_boot')
- def test_network_config_is_converted(self, on_first_boot):
- """Verify that network_data is converted and present on ds object."""
- myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2)
- network_config = openstack.convert_net_json(NETWORK_DATA,
- known_macs=KNOWN_MACS)
- self.assertEqual(myds.network_config, network_config)
-
- def test_network_config_conversion_dhcp6(self):
- """Test some ipv6 input network json and check the expected
- conversions."""
- in_data = {
- 'links': [
- {'vif_id': '2ecc7709-b3f7-4448-9580-e1ec32d75bbd',
- 'ethernet_mac_address': 'fa:16:3e:69:b0:58',
- 'type': 'ovs', 'mtu': None, 'id': 'tap2ecc7709-b3'},
- {'vif_id': '2f88d109-5b57-40e6-af32-2472df09dc33',
- 'ethernet_mac_address': 'fa:16:3e:d4:57:ad',
- 'type': 'ovs', 'mtu': None, 'id': 'tap2f88d109-5b'},
- ],
- 'networks': [
- {'link': 'tap2ecc7709-b3', 'type': 'ipv6_dhcpv6-stateless',
- 'network_id': '6d6357ac-0f70-4afa-8bd7-c274cc4ea235',
- 'id': 'network0'},
- {'link': 'tap2f88d109-5b', 'type': 'ipv6_dhcpv6-stateful',
- 'network_id': 'd227a9b3-6960-4d94-8976-ee5788b44f54',
- 'id': 'network1'},
- ]
- }
- out_data = {
- 'version': 1,
- 'config': [
- {'mac_address': 'fa:16:3e:69:b0:58',
- 'mtu': None,
- 'name': 'enp0s1',
- 'subnets': [{'type': 'ipv6_dhcpv6-stateless'}],
- 'type': 'physical'},
- {'mac_address': 'fa:16:3e:d4:57:ad',
- 'mtu': None,
- 'name': 'enp0s2',
- 'subnets': [{'type': 'ipv6_dhcpv6-stateful'}],
- 'type': 'physical',
- 'accept-ra': True}
- ],
- }
- conv_data = openstack.convert_net_json(in_data, known_macs=KNOWN_MACS)
- self.assertEqual(out_data, conv_data)
-
- def test_network_config_conversions(self):
- """Tests a bunch of input network json and checks the
- expected conversions."""
- in_datas = [
- NETWORK_DATA,
- {
- 'services': [{'type': 'dns', 'address': '172.19.0.12'}],
- 'networks': [{
- 'network_id': 'dacd568d-5be6-4786-91fe-750c374b78b4',
- 'type': 'ipv4',
- 'netmask': '255.255.252.0',
- 'link': 'tap1a81968a-79',
- 'routes': [{
- 'netmask': '0.0.0.0',
- 'network': '0.0.0.0',
- 'gateway': '172.19.3.254',
- }],
- 'ip_address': '172.19.1.34',
- 'id': 'network0',
- }],
- 'links': [{
- 'type': 'bridge',
- 'vif_id': '1a81968a-797a-400f-8a80-567f997eb93f',
- 'ethernet_mac_address': 'fa:16:3e:ed:9a:59',
- 'id': 'tap1a81968a-79',
- 'mtu': None,
- }],
- },
- ]
- out_datas = [
- {
- 'version': 1,
- 'config': [
- {
- 'subnets': [{'type': 'dhcp4'}],
- 'type': 'physical',
- 'mac_address': 'fa:16:3e:69:b0:58',
- 'name': 'enp0s1',
- 'mtu': None,
- },
- {
- 'subnets': [{'type': 'dhcp4'}],
- 'type': 'physical',
- 'mac_address': 'fa:16:3e:d4:57:ad',
- 'name': 'enp0s2',
- 'mtu': None,
- },
- {
- 'subnets': [{'type': 'dhcp4'}],
- 'type': 'physical',
- 'mac_address': 'fa:16:3e:05:30:fe',
- 'name': 'nic0',
- 'mtu': None,
- },
- {
- 'type': 'nameserver',
- 'address': '199.204.44.24',
- },
- {
- 'type': 'nameserver',
- 'address': '199.204.47.54',
- }
- ],
-
- },
- {
- 'version': 1,
- 'config': [
- {
- 'name': 'foo3',
- 'mac_address': 'fa:16:3e:ed:9a:59',
- 'mtu': None,
- 'type': 'physical',
- 'subnets': [
- {
- 'address': '172.19.1.34',
- 'netmask': '255.255.252.0',
- 'type': 'static',
- 'ipv4': True,
- 'routes': [{
- 'gateway': '172.19.3.254',
- 'netmask': '0.0.0.0',
- 'network': '0.0.0.0',
- }],
- }
- ]
- },
- {
- 'type': 'nameserver',
- 'address': '172.19.0.12',
- }
- ],
- },
- ]
- for in_data, out_data in zip(in_datas, out_datas):
- conv_data = openstack.convert_net_json(in_data,
- known_macs=KNOWN_MACS)
- self.assertEqual(out_data, conv_data)
-
-
-class TestConvertNetworkData(CiTestCase):
-
- with_logs = True
-
- def setUp(self):
- super(TestConvertNetworkData, self).setUp()
- self.tmp = self.tmp_dir()
-
- def _getnames_in_config(self, ncfg):
- return set([n['name'] for n in ncfg['config']
- if n['type'] == 'physical'])
-
- def test_conversion_fills_names(self):
- ncfg = openstack.convert_net_json(NETWORK_DATA, known_macs=KNOWN_MACS)
- expected = set(['nic0', 'enp0s1', 'enp0s2'])
- found = self._getnames_in_config(ncfg)
- self.assertEqual(found, expected)
-
- @mock.patch('cloudinit.net.get_interfaces_by_mac')
- def test_convert_reads_system_prefers_name(self, get_interfaces_by_mac):
- macs = KNOWN_MACS.copy()
- macs.update({'fa:16:3e:05:30:fe': 'foonic1',
- 'fa:16:3e:69:b0:58': 'ens1'})
- get_interfaces_by_mac.return_value = macs
-
- ncfg = openstack.convert_net_json(NETWORK_DATA)
- expected = set(['nic0', 'ens1', 'enp0s2'])
- found = self._getnames_in_config(ncfg)
- self.assertEqual(found, expected)
-
- def test_convert_raises_value_error_on_missing_name(self):
- macs = {'aa:aa:aa:aa:aa:00': 'ens1'}
- self.assertRaises(ValueError, openstack.convert_net_json,
- NETWORK_DATA, known_macs=macs)
-
- def test_conversion_with_route(self):
- ncfg = openstack.convert_net_json(NETWORK_DATA_2,
- known_macs=KNOWN_MACS)
- # not the best test, but see that we get a route in the
- # network config and that it gets rendered to an ENI file
- routes = []
- for n in ncfg['config']:
- for s in n.get('subnets', []):
- routes.extend(s.get('routes', []))
- self.assertIn(
- {'network': '0.0.0.0', 'netmask': '0.0.0.0', 'gateway': '2.2.2.9'},
- routes)
- eni_renderer = eni.Renderer()
- eni_renderer.render_network_state(
- network_state.parse_net_config_data(ncfg), target=self.tmp)
- with open(os.path.join(self.tmp, "etc",
- "network", "interfaces"), 'r') as f:
- eni_rendering = f.read()
- self.assertIn("route add default gw 2.2.2.9", eni_rendering)
-
- def test_conversion_with_tap(self):
- ncfg = openstack.convert_net_json(NETWORK_DATA_3,
- known_macs=KNOWN_MACS)
- physicals = set()
- for i in ncfg['config']:
- if i.get('type') == "physical":
- physicals.add(i['name'])
- self.assertEqual(physicals, set(('foo1', 'foo2')))
-
- def test_bond_conversion(self):
- # light testing of bond conversion and eni rendering of bond
- ncfg = openstack.convert_net_json(NETWORK_DATA_BOND,
- known_macs=KNOWN_MACS)
- eni_renderer = eni.Renderer()
-
- eni_renderer.render_network_state(
- network_state.parse_net_config_data(ncfg), target=self.tmp)
- with open(os.path.join(self.tmp, "etc",
- "network", "interfaces"), 'r') as f:
- eni_rendering = f.read()
-
- # Verify there are expected interfaces in the net config.
- interfaces = sorted(
- [i['name'] for i in ncfg['config']
- if i['type'] in ('vlan', 'bond', 'physical')])
- self.assertEqual(
- sorted(["oeth0", "oeth1", "bond0", "bond0.602", "bond0.612"]),
- interfaces)
-
- words = eni_rendering.split()
- # 'eth0' and 'eth1' are the ids. because their mac adresses
- # map to other names, we should not see them in the ENI
- self.assertNotIn('eth0', words)
- self.assertNotIn('eth1', words)
-
- # oeth0 and oeth1 are the interface names for eni.
- # bond0 will be generated for the bond. Each should be auto.
- self.assertIn("auto oeth0", eni_rendering)
- self.assertIn("auto oeth1", eni_rendering)
- self.assertIn("auto bond0", eni_rendering)
- # The bond should have the given mac address
- pos = eni_rendering.find("auto bond0")
- self.assertIn(BOND_MAC, eni_rendering[pos:])
-
- def test_vlan(self):
- # light testing of vlan config conversion and eni rendering
- ncfg = openstack.convert_net_json(NETWORK_DATA_VLAN,
- known_macs=KNOWN_MACS)
- eni_renderer = eni.Renderer()
- eni_renderer.render_network_state(
- network_state.parse_net_config_data(ncfg), target=self.tmp)
- with open(os.path.join(self.tmp, "etc",
- "network", "interfaces"), 'r') as f:
- eni_rendering = f.read()
-
- self.assertIn("iface enp0s1", eni_rendering)
- self.assertIn("address 10.0.1.5", eni_rendering)
- self.assertIn("auto enp0s1.602", eni_rendering)
-
- def test_mac_addrs_can_be_upper_case(self):
- # input mac addresses on rackspace may be upper case
- my_netdata = deepcopy(NETWORK_DATA)
- for link in my_netdata['links']:
- link['ethernet_mac_address'] = link['ethernet_mac_address'].upper()
-
- ncfg = openstack.convert_net_json(my_netdata, known_macs=KNOWN_MACS)
- config_name2mac = {}
- for n in ncfg['config']:
- if n['type'] == 'physical':
- config_name2mac[n['name']] = n['mac_address']
-
- expected = {'nic0': 'fa:16:3e:05:30:fe', 'enp0s1': 'fa:16:3e:69:b0:58',
- 'enp0s2': 'fa:16:3e:d4:57:ad'}
- self.assertEqual(expected, config_name2mac)
-
- def test_unknown_device_types_accepted(self):
- # If we don't recognise a link, we should treat it as physical for a
- # best-effort boot
- my_netdata = deepcopy(NETWORK_DATA)
- my_netdata['links'][0]['type'] = 'my-special-link-type'
-
- ncfg = openstack.convert_net_json(my_netdata, known_macs=KNOWN_MACS)
- config_name2mac = {}
- for n in ncfg['config']:
- if n['type'] == 'physical':
- config_name2mac[n['name']] = n['mac_address']
-
- expected = {'nic0': 'fa:16:3e:05:30:fe', 'enp0s1': 'fa:16:3e:69:b0:58',
- 'enp0s2': 'fa:16:3e:d4:57:ad'}
- self.assertEqual(expected, config_name2mac)
-
- # We should, however, warn the user that we don't recognise the type
- self.assertIn('Unknown network_data link type (my-special-link-type)',
- self.logs.getvalue())
-
-
-def cfg_ds_from_dir(base_d, files=None):
- run = os.path.join(base_d, "run")
- os.mkdir(run)
- cfg_ds = ds.DataSourceConfigDrive(
- settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': run}))
- cfg_ds.seed_dir = os.path.join(base_d, "seed")
- if files:
- populate_dir(cfg_ds.seed_dir, files)
- cfg_ds.known_macs = KNOWN_MACS.copy()
- if not cfg_ds.get_data():
- raise RuntimeError("Data source did not extract itself from"
- " seed directory %s" % cfg_ds.seed_dir)
- return cfg_ds
-
-
-def populate_ds_from_read_config(cfg_ds, source, results):
- """Patch the DataSourceConfigDrive from the results of
- read_config_drive_dir hopefully in line with what it would have
- if cfg_ds.get_data had been successfully called"""
- cfg_ds.source = source
- cfg_ds.metadata = results.get('metadata')
- cfg_ds.ec2_metadata = results.get('ec2-metadata')
- cfg_ds.userdata_raw = results.get('userdata')
- cfg_ds.version = results.get('version')
- cfg_ds.network_json = results.get('networkdata')
- cfg_ds._network_config = openstack.convert_net_json(
- cfg_ds.network_json, known_macs=KNOWN_MACS)
-
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_digitalocean.py b/tests/unittests/test_datasource/test_digitalocean.py
deleted file mode 100644
index 3127014b..00000000
--- a/tests/unittests/test_datasource/test_digitalocean.py
+++ /dev/null
@@ -1,372 +0,0 @@
-# Copyright (C) 2014 Neal Shrader
-#
-# Author: Neal Shrader <neal@digitalocean.com>
-# Author: Ben Howard <bh@digitalocean.com>
-# Author: Scott Moser <smoser@ubuntu.com>
-#
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import json
-
-from cloudinit import helpers
-from cloudinit import settings
-from cloudinit.sources import DataSourceDigitalOcean
-from cloudinit.sources.helpers import digitalocean
-
-from cloudinit.tests.helpers import mock, CiTestCase
-
-DO_MULTIPLE_KEYS = ["ssh-rsa AAAAB3NzaC1yc2EAAAA... test1@do.co",
- "ssh-rsa AAAAB3NzaC1yc2EAAAA... test2@do.co"]
-DO_SINGLE_KEY = "ssh-rsa AAAAB3NzaC1yc2EAAAA... test@do.co"
-
-# the following JSON was taken from droplet (that's why its a string)
-DO_META = json.loads("""
-{
- "droplet_id": "22532410",
- "hostname": "utl-96268",
- "vendor_data": "vendordata goes here",
- "user_data": "userdata goes here",
- "public_keys": "",
- "auth_key": "authorization_key",
- "region": "nyc3",
- "interfaces": {
- "private": [
- {
- "ipv4": {
- "ip_address": "10.132.6.205",
- "netmask": "255.255.0.0",
- "gateway": "10.132.0.1"
- },
- "mac": "04:01:57:d1:9e:02",
- "type": "private"
- }
- ],
- "public": [
- {
- "ipv4": {
- "ip_address": "192.0.0.20",
- "netmask": "255.255.255.0",
- "gateway": "104.236.0.1"
- },
- "ipv6": {
- "ip_address": "2604:A880:0800:0000:1000:0000:0000:0000",
- "cidr": 64,
- "gateway": "2604:A880:0800:0000:0000:0000:0000:0001"
- },
- "anchor_ipv4": {
- "ip_address": "10.0.0.5",
- "netmask": "255.255.0.0",
- "gateway": "10.0.0.1"
- },
- "mac": "04:01:57:d1:9e:01",
- "type": "public"
- }
- ]
- },
- "floating_ip": {
- "ipv4": {
- "active": false
- }
- },
- "dns": {
- "nameservers": [
- "2001:4860:4860::8844",
- "2001:4860:4860::8888",
- "8.8.8.8"
- ]
- }
-}
-""")
-
-# This has no private interface
-DO_META_2 = {
- "droplet_id": 27223699,
- "hostname": "smtest1",
- "vendor_data": "\n".join([
- ('"Content-Type: multipart/mixed; '
- 'boundary=\"===============8645434374073493512==\"'),
- 'MIME-Version: 1.0',
- '',
- '--===============8645434374073493512==',
- 'MIME-Version: 1.0'
- 'Content-Type: text/cloud-config; charset="us-ascii"'
- 'Content-Transfer-Encoding: 7bit'
- 'Content-Disposition: attachment; filename="cloud-config"'
- '',
- '#cloud-config',
- 'disable_root: false',
- 'manage_etc_hosts: true',
- '',
- '',
- '--===============8645434374073493512=='
- ]),
- "public_keys": [
- "ssh-rsa AAAAB3NzaN...N3NtHw== smoser@brickies"
- ],
- "auth_key": "88888888888888888888888888888888",
- "region": "nyc3",
- "interfaces": {
- "public": [{
- "ipv4": {
- "ip_address": "45.55.249.133",
- "netmask": "255.255.192.0",
- "gateway": "45.55.192.1"
- },
- "anchor_ipv4": {
- "ip_address": "10.17.0.5",
- "netmask": "255.255.0.0",
- "gateway": "10.17.0.1"
- },
- "mac": "ae:cc:08:7c:88:00",
- "type": "public"
- }]
- },
- "floating_ip": {"ipv4": {"active": True, "ip_address": "138.197.59.92"}},
- "dns": {"nameservers": ["8.8.8.8", "8.8.4.4"]},
- "tags": None,
-}
-
-DO_META['public_keys'] = DO_SINGLE_KEY
-
-MD_URL = 'http://169.254.169.254/metadata/v1.json'
-
-
-def _mock_dmi():
- return (True, DO_META.get('id'))
-
-
-class TestDataSourceDigitalOcean(CiTestCase):
- """
- Test reading the meta-data
- """
- def setUp(self):
- super(TestDataSourceDigitalOcean, self).setUp()
- self.tmp = self.tmp_dir()
-
- def get_ds(self, get_sysinfo=_mock_dmi):
- ds = DataSourceDigitalOcean.DataSourceDigitalOcean(
- settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))
- ds.use_ip4LL = False
- if get_sysinfo is not None:
- ds._get_sysinfo = get_sysinfo
- return ds
-
- @mock.patch('cloudinit.sources.helpers.digitalocean.read_sysinfo')
- def test_returns_false_not_on_docean(self, m_read_sysinfo):
- m_read_sysinfo.return_value = (False, None)
- ds = self.get_ds(get_sysinfo=None)
- self.assertEqual(False, ds.get_data())
- self.assertTrue(m_read_sysinfo.called)
-
- @mock.patch('cloudinit.sources.helpers.digitalocean.read_metadata')
- def test_metadata(self, mock_readmd):
- mock_readmd.return_value = DO_META.copy()
-
- ds = self.get_ds()
- ret = ds.get_data()
- self.assertTrue(ret)
-
- self.assertTrue(mock_readmd.called)
-
- self.assertEqual(DO_META.get('user_data'), ds.get_userdata_raw())
- self.assertEqual(DO_META.get('vendor_data'), ds.get_vendordata_raw())
- self.assertEqual(DO_META.get('region'), ds.availability_zone)
- self.assertEqual(DO_META.get('droplet_id'), ds.get_instance_id())
- self.assertEqual(DO_META.get('hostname'), ds.get_hostname())
-
- # Single key
- self.assertEqual([DO_META.get('public_keys')],
- ds.get_public_ssh_keys())
-
- self.assertIsInstance(ds.get_public_ssh_keys(), list)
-
- @mock.patch('cloudinit.sources.helpers.digitalocean.read_metadata')
- def test_multiple_ssh_keys(self, mock_readmd):
- metadata = DO_META.copy()
- metadata['public_keys'] = DO_MULTIPLE_KEYS
- mock_readmd.return_value = metadata.copy()
-
- ds = self.get_ds()
- ret = ds.get_data()
- self.assertTrue(ret)
-
- self.assertTrue(mock_readmd.called)
-
- # Multiple keys
- self.assertEqual(metadata['public_keys'], ds.get_public_ssh_keys())
- self.assertIsInstance(ds.get_public_ssh_keys(), list)
-
-
-class TestNetworkConvert(CiTestCase):
-
- def _get_networking(self):
- self.m_get_by_mac.return_value = {
- '04:01:57:d1:9e:01': 'ens1',
- '04:01:57:d1:9e:02': 'ens2',
- 'b8:ae:ed:75:5f:9a': 'enp0s25',
- 'ae:cc:08:7c:88:00': 'meta2p1'}
- netcfg = digitalocean.convert_network_configuration(
- DO_META['interfaces'], DO_META['dns']['nameservers'])
- self.assertIn('config', netcfg)
- return netcfg
-
- def setUp(self):
- super(TestNetworkConvert, self).setUp()
- self.add_patch('cloudinit.net.get_interfaces_by_mac', 'm_get_by_mac')
-
- def test_networking_defined(self):
- netcfg = self._get_networking()
- self.assertIsNotNone(netcfg)
- dns_defined = False
-
- for part in netcfg.get('config'):
- n_type = part.get('type')
- print("testing part ", n_type, "\n", json.dumps(part, indent=3))
-
- if n_type == 'nameserver':
- n_address = part.get('address')
- self.assertIsNotNone(n_address)
- self.assertEqual(len(n_address), 3)
-
- dns_resolvers = DO_META["dns"]["nameservers"]
- for x in n_address:
- self.assertIn(x, dns_resolvers)
- dns_defined = True
-
- else:
- n_subnets = part.get('type')
- n_name = part.get('name')
- n_mac = part.get('mac_address')
-
- self.assertIsNotNone(n_type)
- self.assertIsNotNone(n_subnets)
- self.assertIsNotNone(n_name)
- self.assertIsNotNone(n_mac)
-
- self.assertTrue(dns_defined)
-
- def _get_nic_definition(self, int_type, expected_name):
- """helper function to return if_type (i.e. public) and the expected
- name used by cloud-init (i.e eth0)"""
- netcfg = self._get_networking()
- meta_def = (DO_META.get('interfaces')).get(int_type)[0]
-
- self.assertEqual(int_type, meta_def.get('type'))
-
- for nic_def in netcfg.get('config'):
- print(nic_def)
- if nic_def.get('name') == expected_name:
- return nic_def, meta_def
-
- def _get_match_subn(self, subnets, ip_addr):
- """get the matching subnet definition based on ip address"""
- for subn in subnets:
- address = subn.get('address')
- self.assertIsNotNone(address)
-
- # equals won't work because of ipv6 addressing being in
- # cidr notation, i.e fe00::1/64
- if ip_addr in address:
- print(json.dumps(subn, indent=3))
- return subn
-
- def test_correct_gateways_defined(self):
- """test to make sure the eth0 ipv4 and ipv6 gateways are defined"""
- netcfg = self._get_networking()
- gateways = []
- for nic_def in netcfg.get('config'):
- if nic_def.get('type') != 'physical':
- continue
- for subn in nic_def.get('subnets'):
- if 'gateway' in subn:
- gateways.append(subn.get('gateway'))
-
- # we should have two gateways, one ipv4 and ipv6
- self.assertEqual(len(gateways), 2)
-
- # make that the ipv6 gateway is there
- (nic_def, meta_def) = self._get_nic_definition('public', 'eth0')
- ipv4_def = meta_def.get('ipv4')
- self.assertIn(ipv4_def.get('gateway'), gateways)
-
- # make sure the the ipv6 gateway is there
- ipv6_def = meta_def.get('ipv6')
- self.assertIn(ipv6_def.get('gateway'), gateways)
-
- def test_public_interface_defined(self):
- """test that the public interface is defined as eth0"""
- (nic_def, meta_def) = self._get_nic_definition('public', 'eth0')
- self.assertEqual('eth0', nic_def.get('name'))
- self.assertEqual(meta_def.get('mac'), nic_def.get('mac_address'))
- self.assertEqual('physical', nic_def.get('type'))
-
- def test_private_interface_defined(self):
- """test that the private interface is defined as eth1"""
- (nic_def, meta_def) = self._get_nic_definition('private', 'eth1')
- self.assertEqual('eth1', nic_def.get('name'))
- self.assertEqual(meta_def.get('mac'), nic_def.get('mac_address'))
- self.assertEqual('physical', nic_def.get('type'))
-
- def test_public_interface_ipv6(self):
- """test public ipv6 addressing"""
- (nic_def, meta_def) = self._get_nic_definition('public', 'eth0')
- ipv6_def = meta_def.get('ipv6')
- self.assertIsNotNone(ipv6_def)
-
- subn_def = self._get_match_subn(nic_def.get('subnets'),
- ipv6_def.get('ip_address'))
-
- cidr_notated_address = "{0}/{1}".format(ipv6_def.get('ip_address'),
- ipv6_def.get('cidr'))
-
- self.assertEqual(cidr_notated_address, subn_def.get('address'))
- self.assertEqual(ipv6_def.get('gateway'), subn_def.get('gateway'))
-
- def test_public_interface_ipv4(self):
- """test public ipv4 addressing"""
- (nic_def, meta_def) = self._get_nic_definition('public', 'eth0')
- ipv4_def = meta_def.get('ipv4')
- self.assertIsNotNone(ipv4_def)
-
- subn_def = self._get_match_subn(nic_def.get('subnets'),
- ipv4_def.get('ip_address'))
-
- self.assertEqual(ipv4_def.get('netmask'), subn_def.get('netmask'))
- self.assertEqual(ipv4_def.get('gateway'), subn_def.get('gateway'))
-
- def test_public_interface_anchor_ipv4(self):
- """test public ipv4 addressing"""
- (nic_def, meta_def) = self._get_nic_definition('public', 'eth0')
- ipv4_def = meta_def.get('anchor_ipv4')
- self.assertIsNotNone(ipv4_def)
-
- subn_def = self._get_match_subn(nic_def.get('subnets'),
- ipv4_def.get('ip_address'))
-
- self.assertEqual(ipv4_def.get('netmask'), subn_def.get('netmask'))
- self.assertNotIn('gateway', subn_def)
-
- @mock.patch('cloudinit.net.get_interfaces_by_mac')
- def test_convert_without_private(self, m_get_by_mac):
- m_get_by_mac.return_value = {
- 'b8:ae:ed:75:5f:9a': 'enp0s25',
- 'ae:cc:08:7c:88:00': 'meta2p1'}
- netcfg = digitalocean.convert_network_configuration(
- DO_META_2['interfaces'], DO_META_2['dns']['nameservers'])
-
- # print(netcfg)
- byname = {}
- for i in netcfg['config']:
- if 'name' in i:
- if i['name'] in byname:
- raise ValueError("name '%s' in config twice: %s" %
- (i['name'], netcfg))
- byname[i['name']] = i
- self.assertTrue('eth0' in byname)
- self.assertTrue('subnets' in byname['eth0'])
- eth0 = byname['eth0']
- self.assertEqual(
- sorted(['45.55.249.133', '10.17.0.5']),
- sorted([i['address'] for i in eth0['subnets']]))
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_exoscale.py b/tests/unittests/test_datasource/test_exoscale.py
deleted file mode 100644
index f0061199..00000000
--- a/tests/unittests/test_datasource/test_exoscale.py
+++ /dev/null
@@ -1,211 +0,0 @@
-# Author: Mathieu Corbin <mathieu.corbin@exoscale.com>
-# Author: Christopher Glass <christopher.glass@exoscale.com>
-#
-# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import helpers
-from cloudinit.sources.DataSourceExoscale import (
- API_VERSION,
- DataSourceExoscale,
- METADATA_URL,
- get_password,
- PASSWORD_SERVER_PORT,
- read_metadata)
-from cloudinit.tests.helpers import HttprettyTestCase, mock
-from cloudinit import util
-
-import httpretty
-import os
-import requests
-
-
-TEST_PASSWORD_URL = "{}:{}/{}/".format(METADATA_URL,
- PASSWORD_SERVER_PORT,
- API_VERSION)
-
-TEST_METADATA_URL = "{}/{}/meta-data/".format(METADATA_URL,
- API_VERSION)
-
-TEST_USERDATA_URL = "{}/{}/user-data".format(METADATA_URL,
- API_VERSION)
-
-
-@httpretty.activate
-class TestDatasourceExoscale(HttprettyTestCase):
-
- def setUp(self):
- super(TestDatasourceExoscale, self).setUp()
- self.tmp = self.tmp_dir()
- self.password_url = TEST_PASSWORD_URL
- self.metadata_url = TEST_METADATA_URL
- self.userdata_url = TEST_USERDATA_URL
-
- def test_password_saved(self):
- """The password is not set when it is not found
- in the metadata service."""
- httpretty.register_uri(httpretty.GET,
- self.password_url,
- body="saved_password")
- self.assertFalse(get_password())
-
- def test_password_empty(self):
- """No password is set if the metadata service returns
- an empty string."""
- httpretty.register_uri(httpretty.GET,
- self.password_url,
- body="")
- self.assertFalse(get_password())
-
- def test_password(self):
- """The password is set to what is found in the metadata
- service."""
- expected_password = "p@ssw0rd"
- httpretty.register_uri(httpretty.GET,
- self.password_url,
- body=expected_password)
- password = get_password()
- self.assertEqual(expected_password, password)
-
- def test_activate_removes_set_passwords_semaphore(self):
- """Allow set_passwords to run every boot by removing the semaphore."""
- path = helpers.Paths({'cloud_dir': self.tmp})
- sem_dir = self.tmp_path('instance/sem', dir=self.tmp)
- util.ensure_dir(sem_dir)
- sem_file = os.path.join(sem_dir, 'config_set_passwords')
- with open(sem_file, 'w') as stream:
- stream.write('')
- ds = DataSourceExoscale({}, None, path)
- ds.activate(None, None)
- self.assertFalse(os.path.exists(sem_file))
-
- def test_get_data(self):
- """The datasource conforms to expected behavior when supplied
- full test data."""
- path = helpers.Paths({'run_dir': self.tmp})
- ds = DataSourceExoscale({}, None, path)
- ds._is_platform_viable = lambda: True
- expected_password = "p@ssw0rd"
- expected_id = "12345"
- expected_hostname = "myname"
- expected_userdata = "#cloud-config"
- httpretty.register_uri(httpretty.GET,
- self.userdata_url,
- body=expected_userdata)
- httpretty.register_uri(httpretty.GET,
- self.password_url,
- body=expected_password)
- httpretty.register_uri(httpretty.GET,
- self.metadata_url,
- body="instance-id\nlocal-hostname")
- httpretty.register_uri(httpretty.GET,
- "{}local-hostname".format(self.metadata_url),
- body=expected_hostname)
- httpretty.register_uri(httpretty.GET,
- "{}instance-id".format(self.metadata_url),
- body=expected_id)
- self.assertTrue(ds._get_data())
- self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config")
- self.assertEqual(ds.metadata, {"instance-id": expected_id,
- "local-hostname": expected_hostname})
- self.assertEqual(ds.get_config_obj(),
- {'ssh_pwauth': True,
- 'password': expected_password,
- 'chpasswd': {
- 'expire': False,
- }})
-
- def test_get_data_saved_password(self):
- """The datasource conforms to expected behavior when saved_password is
- returned by the password server."""
- path = helpers.Paths({'run_dir': self.tmp})
- ds = DataSourceExoscale({}, None, path)
- ds._is_platform_viable = lambda: True
- expected_answer = "saved_password"
- expected_id = "12345"
- expected_hostname = "myname"
- expected_userdata = "#cloud-config"
- httpretty.register_uri(httpretty.GET,
- self.userdata_url,
- body=expected_userdata)
- httpretty.register_uri(httpretty.GET,
- self.password_url,
- body=expected_answer)
- httpretty.register_uri(httpretty.GET,
- self.metadata_url,
- body="instance-id\nlocal-hostname")
- httpretty.register_uri(httpretty.GET,
- "{}local-hostname".format(self.metadata_url),
- body=expected_hostname)
- httpretty.register_uri(httpretty.GET,
- "{}instance-id".format(self.metadata_url),
- body=expected_id)
- self.assertTrue(ds._get_data())
- self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config")
- self.assertEqual(ds.metadata, {"instance-id": expected_id,
- "local-hostname": expected_hostname})
- self.assertEqual(ds.get_config_obj(), {})
-
- def test_get_data_no_password(self):
- """The datasource conforms to expected behavior when no password is
- returned by the password server."""
- path = helpers.Paths({'run_dir': self.tmp})
- ds = DataSourceExoscale({}, None, path)
- ds._is_platform_viable = lambda: True
- expected_answer = ""
- expected_id = "12345"
- expected_hostname = "myname"
- expected_userdata = "#cloud-config"
- httpretty.register_uri(httpretty.GET,
- self.userdata_url,
- body=expected_userdata)
- httpretty.register_uri(httpretty.GET,
- self.password_url,
- body=expected_answer)
- httpretty.register_uri(httpretty.GET,
- self.metadata_url,
- body="instance-id\nlocal-hostname")
- httpretty.register_uri(httpretty.GET,
- "{}local-hostname".format(self.metadata_url),
- body=expected_hostname)
- httpretty.register_uri(httpretty.GET,
- "{}instance-id".format(self.metadata_url),
- body=expected_id)
- self.assertTrue(ds._get_data())
- self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config")
- self.assertEqual(ds.metadata, {"instance-id": expected_id,
- "local-hostname": expected_hostname})
- self.assertEqual(ds.get_config_obj(), {})
-
- @mock.patch('cloudinit.sources.DataSourceExoscale.get_password')
- def test_read_metadata_when_password_server_unreachable(self, m_password):
- """The read_metadata function returns partial results in case the
- password server (only) is unreachable."""
- expected_id = "12345"
- expected_hostname = "myname"
- expected_userdata = "#cloud-config"
-
- m_password.side_effect = requests.Timeout('Fake Connection Timeout')
- httpretty.register_uri(httpretty.GET,
- self.userdata_url,
- body=expected_userdata)
- httpretty.register_uri(httpretty.GET,
- self.metadata_url,
- body="instance-id\nlocal-hostname")
- httpretty.register_uri(httpretty.GET,
- "{}local-hostname".format(self.metadata_url),
- body=expected_hostname)
- httpretty.register_uri(httpretty.GET,
- "{}instance-id".format(self.metadata_url),
- body=expected_id)
-
- result = read_metadata()
-
- self.assertIsNone(result.get("password"))
- self.assertEqual(result.get("user-data").decode("utf-8"),
- expected_userdata)
-
- def test_non_viable_platform(self):
- """The datasource fails fast when the platform is not viable."""
- path = helpers.Paths({'run_dir': self.tmp})
- ds = DataSourceExoscale({}, None, path)
- ds._is_platform_viable = lambda: False
- self.assertFalse(ds._get_data())
diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py
deleted file mode 100644
index 01f4cbd1..00000000
--- a/tests/unittests/test_datasource/test_gce.py
+++ /dev/null
@@ -1,363 +0,0 @@
-# Copyright (C) 2014 Vaidas Jablonskis
-#
-# Author: Vaidas Jablonskis <jablonskis@gmail.com>
-#
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import datetime
-import httpretty
-import json
-import re
-from unittest import mock
-from urllib.parse import urlparse
-
-from base64 import b64encode, b64decode
-
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import settings
-from cloudinit.sources import DataSourceGCE
-
-from cloudinit.tests import helpers as test_helpers
-
-
-GCE_META = {
- 'instance/id': '123',
- 'instance/zone': 'foo/bar',
- 'instance/hostname': 'server.project-foo.local',
-}
-
-GCE_META_PARTIAL = {
- 'instance/id': '1234',
- 'instance/hostname': 'server.project-bar.local',
- 'instance/zone': 'bar/baz',
-}
-
-GCE_META_ENCODING = {
- 'instance/id': '12345',
- 'instance/hostname': 'server.project-baz.local',
- 'instance/zone': 'baz/bang',
- 'instance/attributes': {
- 'user-data': b64encode(b'#!/bin/echo baz\n').decode('utf-8'),
- 'user-data-encoding': 'base64',
- }
-}
-
-GCE_USER_DATA_TEXT = {
- 'instance/id': '12345',
- 'instance/hostname': 'server.project-baz.local',
- 'instance/zone': 'baz/bang',
- 'instance/attributes': {
- 'user-data': '#!/bin/sh\necho hi mom\ntouch /run/up-now\n',
- }
-}
-
-HEADERS = {'Metadata-Flavor': 'Google'}
-MD_URL_RE = re.compile(
- r'http://metadata.google.internal/computeMetadata/v1/.*')
-GUEST_ATTRIBUTES_URL = ('http://metadata.google.internal/computeMetadata/'
- 'v1/instance/guest-attributes/hostkeys/')
-
-
-def _set_mock_metadata(gce_meta=None):
- if gce_meta is None:
- gce_meta = GCE_META
-
- def _request_callback(method, uri, headers):
- url_path = urlparse(uri).path
- if url_path.startswith('/computeMetadata/v1/'):
- path = url_path.split('/computeMetadata/v1/')[1:][0]
- recursive = path.endswith('/')
- path = path.rstrip('/')
- else:
- path = None
- if path in gce_meta:
- response = gce_meta.get(path)
- if recursive:
- response = json.dumps(response)
- return (200, headers, response)
- else:
- return (404, headers, '')
-
- # reset is needed. https://github.com/gabrielfalcao/HTTPretty/issues/316
- httpretty.register_uri(httpretty.GET, MD_URL_RE, body=_request_callback)
-
-
-@httpretty.activate
-class TestDataSourceGCE(test_helpers.HttprettyTestCase):
-
- def _make_distro(self, dtype, def_user=None):
- cfg = dict(settings.CFG_BUILTIN)
- cfg['system_info']['distro'] = dtype
- paths = helpers.Paths(cfg['system_info']['paths'])
- distro_cls = distros.fetch(dtype)
- if def_user:
- cfg['system_info']['default_user'] = def_user.copy()
- distro = distro_cls(dtype, cfg['system_info'], paths)
- return distro
-
- def setUp(self):
- tmp = self.tmp_dir()
- self.ds = DataSourceGCE.DataSourceGCE(
- settings.CFG_BUILTIN, None,
- helpers.Paths({'run_dir': tmp}))
- ppatch = self.m_platform_reports_gce = mock.patch(
- 'cloudinit.sources.DataSourceGCE.platform_reports_gce')
- self.m_platform_reports_gce = ppatch.start()
- self.m_platform_reports_gce.return_value = True
- self.addCleanup(ppatch.stop)
- super(TestDataSourceGCE, self).setUp()
-
- def test_connection(self):
- _set_mock_metadata()
- success = self.ds.get_data()
- self.assertTrue(success)
-
- req_header = httpretty.last_request().headers
- for header_name, expected_value in HEADERS.items():
- self.assertEqual(expected_value, req_header.get(header_name))
-
- def test_metadata(self):
- # UnicodeDecodeError if set to ds.userdata instead of userdata_raw
- meta = GCE_META.copy()
- meta['instance/attributes/user-data'] = b'/bin/echo \xff\n'
-
- _set_mock_metadata()
- self.ds.get_data()
-
- shostname = GCE_META.get('instance/hostname').split('.')[0]
- self.assertEqual(shostname,
- self.ds.get_hostname())
-
- self.assertEqual(GCE_META.get('instance/id'),
- self.ds.get_instance_id())
-
- self.assertEqual(GCE_META.get('instance/attributes/user-data'),
- self.ds.get_userdata_raw())
-
- # test partial metadata (missing user-data in particular)
- def test_metadata_partial(self):
- _set_mock_metadata(GCE_META_PARTIAL)
- self.ds.get_data()
-
- self.assertEqual(GCE_META_PARTIAL.get('instance/id'),
- self.ds.get_instance_id())
-
- shostname = GCE_META_PARTIAL.get('instance/hostname').split('.')[0]
- self.assertEqual(shostname, self.ds.get_hostname())
-
- def test_userdata_no_encoding(self):
- """check that user-data is read."""
- _set_mock_metadata(GCE_USER_DATA_TEXT)
- self.ds.get_data()
- self.assertEqual(
- GCE_USER_DATA_TEXT['instance/attributes']['user-data'].encode(),
- self.ds.get_userdata_raw())
-
- def test_metadata_encoding(self):
- """user-data is base64 encoded if user-data-encoding is 'base64'."""
- _set_mock_metadata(GCE_META_ENCODING)
- self.ds.get_data()
-
- instance_data = GCE_META_ENCODING.get('instance/attributes')
- decoded = b64decode(instance_data.get('user-data'))
- self.assertEqual(decoded, self.ds.get_userdata_raw())
-
- def test_missing_required_keys_return_false(self):
- for required_key in ['instance/id', 'instance/zone',
- 'instance/hostname']:
- meta = GCE_META_PARTIAL.copy()
- del meta[required_key]
- _set_mock_metadata(meta)
- self.assertEqual(False, self.ds.get_data())
- httpretty.reset()
-
- def test_no_ssh_keys_metadata(self):
- _set_mock_metadata()
- self.ds.get_data()
- self.assertEqual([], self.ds.get_public_ssh_keys())
-
- def test_cloudinit_ssh_keys(self):
- valid_key = 'ssh-rsa VALID {0}'
- invalid_key = 'ssh-rsa INVALID {0}'
- project_attributes = {
- 'sshKeys': '\n'.join([
- 'cloudinit:{0}'.format(valid_key.format(0)),
- 'user:{0}'.format(invalid_key.format(0)),
- ]),
- 'ssh-keys': '\n'.join([
- 'cloudinit:{0}'.format(valid_key.format(1)),
- 'user:{0}'.format(invalid_key.format(1)),
- ]),
- }
- instance_attributes = {
- 'ssh-keys': '\n'.join([
- 'cloudinit:{0}'.format(valid_key.format(2)),
- 'user:{0}'.format(invalid_key.format(2)),
- ]),
- 'block-project-ssh-keys': 'False',
- }
-
- meta = GCE_META.copy()
- meta['project/attributes'] = project_attributes
- meta['instance/attributes'] = instance_attributes
-
- _set_mock_metadata(meta)
- self.ds.get_data()
-
- expected = [valid_key.format(key) for key in range(3)]
- self.assertEqual(set(expected), set(self.ds.get_public_ssh_keys()))
-
- @mock.patch("cloudinit.sources.DataSourceGCE.ug_util")
- def test_default_user_ssh_keys(self, mock_ug_util):
- mock_ug_util.normalize_users_groups.return_value = None, None
- mock_ug_util.extract_default.return_value = 'ubuntu', None
- ubuntu_ds = DataSourceGCE.DataSourceGCE(
- settings.CFG_BUILTIN, self._make_distro('ubuntu'),
- helpers.Paths({'run_dir': self.tmp_dir()}))
-
- valid_key = 'ssh-rsa VALID {0}'
- invalid_key = 'ssh-rsa INVALID {0}'
- project_attributes = {
- 'sshKeys': '\n'.join([
- 'ubuntu:{0}'.format(valid_key.format(0)),
- 'user:{0}'.format(invalid_key.format(0)),
- ]),
- 'ssh-keys': '\n'.join([
- 'ubuntu:{0}'.format(valid_key.format(1)),
- 'user:{0}'.format(invalid_key.format(1)),
- ]),
- }
- instance_attributes = {
- 'ssh-keys': '\n'.join([
- 'ubuntu:{0}'.format(valid_key.format(2)),
- 'user:{0}'.format(invalid_key.format(2)),
- ]),
- 'block-project-ssh-keys': 'False',
- }
-
- meta = GCE_META.copy()
- meta['project/attributes'] = project_attributes
- meta['instance/attributes'] = instance_attributes
-
- _set_mock_metadata(meta)
- ubuntu_ds.get_data()
-
- expected = [valid_key.format(key) for key in range(3)]
- self.assertEqual(set(expected), set(ubuntu_ds.get_public_ssh_keys()))
-
- def test_instance_ssh_keys_override(self):
- valid_key = 'ssh-rsa VALID {0}'
- invalid_key = 'ssh-rsa INVALID {0}'
- project_attributes = {
- 'sshKeys': 'cloudinit:{0}'.format(invalid_key.format(0)),
- 'ssh-keys': 'cloudinit:{0}'.format(invalid_key.format(1)),
- }
- instance_attributes = {
- 'sshKeys': 'cloudinit:{0}'.format(valid_key.format(0)),
- 'ssh-keys': 'cloudinit:{0}'.format(valid_key.format(1)),
- 'block-project-ssh-keys': 'False',
- }
-
- meta = GCE_META.copy()
- meta['project/attributes'] = project_attributes
- meta['instance/attributes'] = instance_attributes
-
- _set_mock_metadata(meta)
- self.ds.get_data()
-
- expected = [valid_key.format(key) for key in range(2)]
- self.assertEqual(set(expected), set(self.ds.get_public_ssh_keys()))
-
- def test_block_project_ssh_keys_override(self):
- valid_key = 'ssh-rsa VALID {0}'
- invalid_key = 'ssh-rsa INVALID {0}'
- project_attributes = {
- 'sshKeys': 'cloudinit:{0}'.format(invalid_key.format(0)),
- 'ssh-keys': 'cloudinit:{0}'.format(invalid_key.format(1)),
- }
- instance_attributes = {
- 'ssh-keys': 'cloudinit:{0}'.format(valid_key.format(0)),
- 'block-project-ssh-keys': 'True',
- }
-
- meta = GCE_META.copy()
- meta['project/attributes'] = project_attributes
- meta['instance/attributes'] = instance_attributes
-
- _set_mock_metadata(meta)
- self.ds.get_data()
-
- expected = [valid_key.format(0)]
- self.assertEqual(set(expected), set(self.ds.get_public_ssh_keys()))
-
- def test_only_last_part_of_zone_used_for_availability_zone(self):
- _set_mock_metadata()
- r = self.ds.get_data()
- self.assertEqual(True, r)
- self.assertEqual('bar', self.ds.availability_zone)
-
- @mock.patch("cloudinit.sources.DataSourceGCE.GoogleMetadataFetcher")
- def test_get_data_returns_false_if_not_on_gce(self, m_fetcher):
- self.m_platform_reports_gce.return_value = False
- ret = self.ds.get_data()
- self.assertEqual(False, ret)
- m_fetcher.assert_not_called()
-
- def test_has_expired(self):
-
- def _get_timestamp(days):
- format_str = '%Y-%m-%dT%H:%M:%S+0000'
- today = datetime.datetime.now()
- timestamp = today + datetime.timedelta(days=days)
- return timestamp.strftime(format_str)
-
- past = _get_timestamp(-1)
- future = _get_timestamp(1)
- ssh_keys = {
- None: False,
- '': False,
- 'Invalid': False,
- 'user:ssh-rsa key user@domain.com': False,
- 'user:ssh-rsa key google {"expireOn":"%s"}' % past: False,
- 'user:ssh-rsa key google-ssh': False,
- 'user:ssh-rsa key google-ssh {invalid:json}': False,
- 'user:ssh-rsa key google-ssh {"userName":"user"}': False,
- 'user:ssh-rsa key google-ssh {"expireOn":"invalid"}': False,
- 'user:xyz key google-ssh {"expireOn":"%s"}' % future: False,
- 'user:xyz key google-ssh {"expireOn":"%s"}' % past: True,
- }
-
- for key, expired in ssh_keys.items():
- self.assertEqual(DataSourceGCE._has_expired(key), expired)
-
- def test_parse_public_keys_non_ascii(self):
- public_key_data = [
- 'cloudinit:rsa ssh-ke%s invalid' % chr(165),
- 'use%sname:rsa ssh-key' % chr(174),
- 'cloudinit:test 1',
- 'default:test 2',
- 'user:test 3',
- ]
- expected = ['test 1', 'test 2']
- found = DataSourceGCE._parse_public_keys(
- public_key_data, default_user='default')
- self.assertEqual(sorted(found), sorted(expected))
-
- @mock.patch("cloudinit.url_helper.readurl")
- def test_publish_host_keys(self, m_readurl):
- hostkeys = [('ssh-rsa', 'asdfasdf'),
- ('ssh-ed25519', 'qwerqwer')]
- readurl_expected_calls = [
- mock.call(check_status=False, data=b'asdfasdf', headers=HEADERS,
- request_method='PUT',
- url='%s%s' % (GUEST_ATTRIBUTES_URL, 'ssh-rsa')),
- mock.call(check_status=False, data=b'qwerqwer', headers=HEADERS,
- request_method='PUT',
- url='%s%s' % (GUEST_ATTRIBUTES_URL, 'ssh-ed25519')),
- ]
- self.ds.publish_host_keys(hostkeys)
- m_readurl.assert_has_calls(readurl_expected_calls, any_order=True)
-
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py
deleted file mode 100644
index 415755aa..00000000
--- a/tests/unittests/test_datasource/test_openstack.py
+++ /dev/null
@@ -1,694 +0,0 @@
-# Copyright (C) 2014 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import copy
-import httpretty as hp
-import json
-import re
-from io import StringIO
-from urllib.parse import urlparse
-
-from cloudinit.tests import helpers as test_helpers
-
-from cloudinit import helpers
-from cloudinit import settings
-from cloudinit.sources import BrokenMetadata, convert_vendordata, UNSET
-from cloudinit.sources import DataSourceOpenStack as ds
-from cloudinit.sources.helpers import openstack
-from cloudinit import util
-
-BASE_URL = "http://169.254.169.254"
-PUBKEY = u'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n'
-EC2_META = {
- 'ami-id': 'ami-00000001',
- 'ami-launch-index': '0',
- 'ami-manifest-path': 'FIXME',
- 'hostname': 'sm-foo-test.novalocal',
- 'instance-action': 'none',
- 'instance-id': 'i-00000001',
- 'instance-type': 'm1.tiny',
- 'local-hostname': 'sm-foo-test.novalocal',
- 'local-ipv4': '0.0.0.0',
- 'public-hostname': 'sm-foo-test.novalocal',
- 'public-ipv4': '0.0.0.1',
- 'reservation-id': 'r-iru5qm4m',
-}
-USER_DATA = b'#!/bin/sh\necho This is user data\n'
-VENDOR_DATA = {
- 'magic': '',
-}
-OSTACK_META = {
- 'availability_zone': 'nova',
- 'files': [{'content_path': '/content/0000', 'path': '/etc/foo.cfg'},
- {'content_path': '/content/0001', 'path': '/etc/bar/bar.cfg'}],
- 'hostname': 'sm-foo-test.novalocal',
- 'meta': {'dsmode': 'local', 'my-meta': 'my-value'},
- 'name': 'sm-foo-test',
- 'public_keys': {'mykey': PUBKEY},
- 'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c',
-}
-CONTENT_0 = b'This is contents of /etc/foo.cfg\n'
-CONTENT_1 = b'# this is /etc/bar/bar.cfg\n'
-OS_FILES = {
- 'openstack/content/0000': CONTENT_0,
- 'openstack/content/0001': CONTENT_1,
- 'openstack/latest/meta_data.json': json.dumps(OSTACK_META),
- 'openstack/latest/network_data.json': json.dumps(
- {'links': [], 'networks': [], 'services': []}),
- 'openstack/latest/user_data': USER_DATA,
- 'openstack/latest/vendor_data.json': json.dumps(VENDOR_DATA),
-}
-EC2_FILES = {
- 'latest/user-data': USER_DATA,
-}
-EC2_VERSIONS = [
- 'latest',
-]
-
-MOCK_PATH = 'cloudinit.sources.DataSourceOpenStack.'
-
-
-# TODO _register_uris should leverage test_ec2.register_mock_metaserver.
-def _register_uris(version, ec2_files, ec2_meta, os_files):
- """Registers a set of url patterns into httpretty that will mimic the
- same data returned by the openstack metadata service (and ec2 service)."""
-
- def match_ec2_url(uri, headers):
- path = uri.path.strip("/")
- if len(path) == 0:
- return (200, headers, "\n".join(EC2_VERSIONS))
- path = uri.path.lstrip("/")
- if path in ec2_files:
- return (200, headers, ec2_files.get(path))
- if path == 'latest/meta-data/':
- buf = StringIO()
- for (k, v) in ec2_meta.items():
- if isinstance(v, (list, tuple)):
- buf.write("%s/" % (k))
- else:
- buf.write("%s" % (k))
- buf.write("\n")
- return (200, headers, buf.getvalue())
- if path.startswith('latest/meta-data/'):
- value = None
- pieces = path.split("/")
- if path.endswith("/"):
- pieces = pieces[2:-1]
- value = util.get_cfg_by_path(ec2_meta, pieces)
- else:
- pieces = pieces[2:]
- value = util.get_cfg_by_path(ec2_meta, pieces)
- if value is not None:
- return (200, headers, str(value))
- return (404, headers, '')
-
- def match_os_uri(uri, headers):
- path = uri.path.strip("/")
- if path == 'openstack':
- return (200, headers, "\n".join([openstack.OS_LATEST]))
- path = uri.path.lstrip("/")
- if path in os_files:
- return (200, headers, os_files.get(path))
- return (404, headers, '')
-
- def get_request_callback(method, uri, headers):
- uri = urlparse(uri)
- path = uri.path.lstrip("/").split("/")
- if path[0] == 'openstack':
- return match_os_uri(uri, headers)
- return match_ec2_url(uri, headers)
-
- hp.register_uri(hp.GET, re.compile(r'http://169.254.169.254/.*'),
- body=get_request_callback)
-
-
-def _read_metadata_service():
- return ds.read_metadata_service(BASE_URL, retries=0, timeout=0.1)
-
-
-class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
-
- with_logs = True
- VERSION = 'latest'
-
- def setUp(self):
- super(TestOpenStackDataSource, self).setUp()
- self.tmp = self.tmp_dir()
-
- def test_successful(self):
- _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES)
- f = _read_metadata_service()
- self.assertEqual(VENDOR_DATA, f.get('vendordata'))
- self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg'])
- self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg'])
- self.assertEqual(2, len(f['files']))
- self.assertEqual(USER_DATA, f.get('userdata'))
- self.assertEqual(EC2_META, f.get('ec2-metadata'))
- self.assertEqual(2, f.get('version'))
- metadata = f['metadata']
- self.assertEqual('nova', metadata.get('availability_zone'))
- self.assertEqual('sm-foo-test.novalocal', metadata.get('hostname'))
- self.assertEqual('sm-foo-test.novalocal',
- metadata.get('local-hostname'))
- self.assertEqual('sm-foo-test', metadata.get('name'))
- self.assertEqual('b0fa911b-69d4-4476-bbe2-1c92bff6535c',
- metadata.get('uuid'))
- self.assertEqual('b0fa911b-69d4-4476-bbe2-1c92bff6535c',
- metadata.get('instance-id'))
-
- def test_no_ec2(self):
- _register_uris(self.VERSION, {}, {}, OS_FILES)
- f = _read_metadata_service()
- self.assertEqual(VENDOR_DATA, f.get('vendordata'))
- self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg'])
- self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg'])
- self.assertEqual(USER_DATA, f.get('userdata'))
- self.assertEqual({}, f.get('ec2-metadata'))
- self.assertEqual(2, f.get('version'))
-
- def test_bad_metadata(self):
- os_files = copy.deepcopy(OS_FILES)
- for k in list(os_files.keys()):
- if k.endswith('meta_data.json'):
- os_files.pop(k, None)
- _register_uris(self.VERSION, {}, {}, os_files)
- self.assertRaises(openstack.NonReadable, _read_metadata_service)
-
- def test_bad_uuid(self):
- os_files = copy.deepcopy(OS_FILES)
- os_meta = copy.deepcopy(OSTACK_META)
- os_meta.pop('uuid')
- for k in list(os_files.keys()):
- if k.endswith('meta_data.json'):
- os_files[k] = json.dumps(os_meta)
- _register_uris(self.VERSION, {}, {}, os_files)
- self.assertRaises(BrokenMetadata, _read_metadata_service)
-
- def test_userdata_empty(self):
- os_files = copy.deepcopy(OS_FILES)
- for k in list(os_files.keys()):
- if k.endswith('user_data'):
- os_files.pop(k, None)
- _register_uris(self.VERSION, {}, {}, os_files)
- f = _read_metadata_service()
- self.assertEqual(VENDOR_DATA, f.get('vendordata'))
- self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg'])
- self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg'])
- self.assertFalse(f.get('userdata'))
-
- def test_vendordata_empty(self):
- os_files = copy.deepcopy(OS_FILES)
- for k in list(os_files.keys()):
- if k.endswith('vendor_data.json'):
- os_files.pop(k, None)
- _register_uris(self.VERSION, {}, {}, os_files)
- f = _read_metadata_service()
- self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg'])
- self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg'])
- self.assertFalse(f.get('vendordata'))
-
- def test_vendordata_invalid(self):
- os_files = copy.deepcopy(OS_FILES)
- for k in list(os_files.keys()):
- if k.endswith('vendor_data.json'):
- os_files[k] = '{' # some invalid json
- _register_uris(self.VERSION, {}, {}, os_files)
- self.assertRaises(BrokenMetadata, _read_metadata_service)
-
- def test_metadata_invalid(self):
- os_files = copy.deepcopy(OS_FILES)
- for k in list(os_files.keys()):
- if k.endswith('meta_data.json'):
- os_files[k] = '{' # some invalid json
- _register_uris(self.VERSION, {}, {}, os_files)
- self.assertRaises(BrokenMetadata, _read_metadata_service)
-
- @test_helpers.mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
- def test_datasource(self, m_dhcp):
- _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES)
- ds_os = ds.DataSourceOpenStack(
- settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))
- self.assertIsNone(ds_os.version)
- mock_path = MOCK_PATH + 'detect_openstack'
- with test_helpers.mock.patch(mock_path) as m_detect_os:
- m_detect_os.return_value = True
- found = ds_os.get_data()
- self.assertTrue(found)
- self.assertEqual(2, ds_os.version)
- md = dict(ds_os.metadata)
- md.pop('instance-id', None)
- md.pop('local-hostname', None)
- self.assertEqual(OSTACK_META, md)
- self.assertEqual(EC2_META, ds_os.ec2_metadata)
- self.assertEqual(USER_DATA, ds_os.userdata_raw)
- self.assertEqual(2, len(ds_os.files))
- self.assertEqual(VENDOR_DATA, ds_os.vendordata_pure)
- self.assertIsNone(ds_os.vendordata_raw)
- m_dhcp.assert_not_called()
-
- @hp.activate
- @test_helpers.mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network')
- @test_helpers.mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
- def test_local_datasource(self, m_dhcp, m_net):
- """OpenStackLocal calls EphemeralDHCPNetwork and gets instance data."""
- _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES)
- ds_os_local = ds.DataSourceOpenStackLocal(
- settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))
- ds_os_local._fallback_interface = 'eth9' # Monkey patch for dhcp
- m_dhcp.return_value = [{
- 'interface': 'eth9', 'fixed-address': '192.168.2.9',
- 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
- 'broadcast-address': '192.168.2.255'}]
-
- self.assertIsNone(ds_os_local.version)
- mock_path = MOCK_PATH + 'detect_openstack'
- with test_helpers.mock.patch(mock_path) as m_detect_os:
- m_detect_os.return_value = True
- found = ds_os_local.get_data()
- self.assertTrue(found)
- self.assertEqual(2, ds_os_local.version)
- md = dict(ds_os_local.metadata)
- md.pop('instance-id', None)
- md.pop('local-hostname', None)
- self.assertEqual(OSTACK_META, md)
- self.assertEqual(EC2_META, ds_os_local.ec2_metadata)
- self.assertEqual(USER_DATA, ds_os_local.userdata_raw)
- self.assertEqual(2, len(ds_os_local.files))
- self.assertEqual(VENDOR_DATA, ds_os_local.vendordata_pure)
- self.assertIsNone(ds_os_local.vendordata_raw)
- m_dhcp.assert_called_with('eth9', None)
-
- def test_bad_datasource_meta(self):
- os_files = copy.deepcopy(OS_FILES)
- for k in list(os_files.keys()):
- if k.endswith('meta_data.json'):
- os_files[k] = '{' # some invalid json
- _register_uris(self.VERSION, {}, {}, os_files)
- ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN,
- None,
- helpers.Paths({'run_dir': self.tmp}))
- self.assertIsNone(ds_os.version)
- mock_path = MOCK_PATH + 'detect_openstack'
- with test_helpers.mock.patch(mock_path) as m_detect_os:
- m_detect_os.return_value = True
- found = ds_os.get_data()
- self.assertFalse(found)
- self.assertIsNone(ds_os.version)
- self.assertIn(
- 'InvalidMetaDataException: Broken metadata address'
- ' http://169.254.169.25',
- self.logs.getvalue())
-
- def test_no_datasource(self):
- os_files = copy.deepcopy(OS_FILES)
- for k in list(os_files.keys()):
- if k.endswith('meta_data.json'):
- os_files.pop(k)
- _register_uris(self.VERSION, {}, {}, os_files)
- ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN,
- None,
- helpers.Paths({'run_dir': self.tmp}))
- ds_os.ds_cfg = {
- 'max_wait': 0,
- 'timeout': 0,
- }
- self.assertIsNone(ds_os.version)
- mock_path = MOCK_PATH + 'detect_openstack'
- with test_helpers.mock.patch(mock_path) as m_detect_os:
- m_detect_os.return_value = True
- found = ds_os.get_data()
- self.assertFalse(found)
- self.assertIsNone(ds_os.version)
-
- def test_network_config_disabled_by_datasource_config(self):
- """The network_config can be disabled from datasource config."""
- mock_path = MOCK_PATH + 'openstack.convert_net_json'
- ds_os = ds.DataSourceOpenStack(
- settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))
- ds_os.ds_cfg = {'apply_network_config': False}
- sample_json = {'links': [{'ethernet_mac_address': 'mymac'}],
- 'networks': [], 'services': []}
- ds_os.network_json = sample_json # Ignore this content from metadata
- with test_helpers.mock.patch(mock_path) as m_convert_json:
- self.assertIsNone(ds_os.network_config)
- m_convert_json.assert_not_called()
-
- def test_network_config_from_network_json(self):
- """The datasource gets network_config from network_data.json."""
- mock_path = MOCK_PATH + 'openstack.convert_net_json'
- example_cfg = {'version': 1, 'config': []}
- ds_os = ds.DataSourceOpenStack(
- settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))
- sample_json = {'links': [{'ethernet_mac_address': 'mymac'}],
- 'networks': [], 'services': []}
- ds_os.network_json = sample_json
- with test_helpers.mock.patch(mock_path) as m_convert_json:
- m_convert_json.return_value = example_cfg
- self.assertEqual(example_cfg, ds_os.network_config)
- self.assertIn(
- 'network config provided via network_json', self.logs.getvalue())
- m_convert_json.assert_called_with(sample_json, known_macs=None)
-
- def test_network_config_cached(self):
- """The datasource caches the network_config property."""
- mock_path = MOCK_PATH + 'openstack.convert_net_json'
- example_cfg = {'version': 1, 'config': []}
- ds_os = ds.DataSourceOpenStack(
- settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))
- ds_os._network_config = example_cfg
- with test_helpers.mock.patch(mock_path) as m_convert_json:
- self.assertEqual(example_cfg, ds_os.network_config)
- m_convert_json.assert_not_called()
-
- def test_disabled_datasource(self):
- os_files = copy.deepcopy(OS_FILES)
- os_meta = copy.deepcopy(OSTACK_META)
- os_meta['meta'] = {
- 'dsmode': 'disabled',
- }
- for k in list(os_files.keys()):
- if k.endswith('meta_data.json'):
- os_files[k] = json.dumps(os_meta)
- _register_uris(self.VERSION, {}, {}, os_files)
- ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN,
- None,
- helpers.Paths({'run_dir': self.tmp}))
- ds_os.ds_cfg = {
- 'max_wait': 0,
- 'timeout': 0,
- }
- self.assertIsNone(ds_os.version)
- mock_path = MOCK_PATH + 'detect_openstack'
- with test_helpers.mock.patch(mock_path) as m_detect_os:
- m_detect_os.return_value = True
- found = ds_os.get_data()
- self.assertFalse(found)
- self.assertIsNone(ds_os.version)
-
- @hp.activate
- def test_wb__crawl_metadata_does_not_persist(self):
- """_crawl_metadata returns current metadata and does not cache."""
- _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES)
- ds_os = ds.DataSourceOpenStack(
- settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))
- crawled_data = ds_os._crawl_metadata()
- self.assertEqual(UNSET, ds_os.ec2_metadata)
- self.assertIsNone(ds_os.userdata_raw)
- self.assertEqual(0, len(ds_os.files))
- self.assertIsNone(ds_os.vendordata_raw)
- self.assertEqual(
- ['dsmode', 'ec2-metadata', 'files', 'metadata', 'networkdata',
- 'userdata', 'vendordata', 'version'],
- sorted(crawled_data.keys()))
- self.assertEqual('local', crawled_data['dsmode'])
- self.assertEqual(EC2_META, crawled_data['ec2-metadata'])
- self.assertEqual(2, len(crawled_data['files']))
- md = copy.deepcopy(crawled_data['metadata'])
- md.pop('instance-id')
- md.pop('local-hostname')
- self.assertEqual(OSTACK_META, md)
- self.assertEqual(
- json.loads(OS_FILES['openstack/latest/network_data.json']),
- crawled_data['networkdata'])
- self.assertEqual(USER_DATA, crawled_data['userdata'])
- self.assertEqual(VENDOR_DATA, crawled_data['vendordata'])
- self.assertEqual(2, crawled_data['version'])
-
-
-class TestVendorDataLoading(test_helpers.TestCase):
- def cvj(self, data):
- return convert_vendordata(data)
-
- def test_vd_load_none(self):
- # non-existant vendor-data should return none
- self.assertIsNone(self.cvj(None))
-
- def test_vd_load_string(self):
- self.assertEqual(self.cvj("foobar"), "foobar")
-
- def test_vd_load_list(self):
- data = [{'foo': 'bar'}, 'mystring', list(['another', 'list'])]
- self.assertEqual(self.cvj(data), data)
-
- def test_vd_load_dict_no_ci(self):
- self.assertIsNone(self.cvj({'foo': 'bar'}))
-
- def test_vd_load_dict_ci_dict(self):
- self.assertRaises(ValueError, self.cvj,
- {'foo': 'bar', 'cloud-init': {'x': 1}})
-
- def test_vd_load_dict_ci_string(self):
- data = {'foo': 'bar', 'cloud-init': 'VENDOR_DATA'}
- self.assertEqual(self.cvj(data), data['cloud-init'])
-
- def test_vd_load_dict_ci_list(self):
- data = {'foo': 'bar', 'cloud-init': ['VD_1', 'VD_2']}
- self.assertEqual(self.cvj(data), data['cloud-init'])
-
-
-@test_helpers.mock.patch(MOCK_PATH + 'util.is_x86')
-class TestDetectOpenStack(test_helpers.CiTestCase):
-
- def test_detect_openstack_non_intel_x86(self, m_is_x86):
- """Return True on non-intel platforms because dmi isn't conclusive."""
- m_is_x86.return_value = False
- self.assertTrue(
- ds.detect_openstack(), 'Expected detect_openstack == True')
-
- @test_helpers.mock.patch(MOCK_PATH + 'util.get_proc_env')
- @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data')
- def test_not_detect_openstack_intel_x86_ec2(self, m_dmi, m_proc_env,
- m_is_x86):
- """Return False on EC2 platforms."""
- m_is_x86.return_value = True
- # No product_name in proc/1/environ
- m_proc_env.return_value = {'HOME': '/'}
-
- def fake_dmi_read(dmi_key):
- if dmi_key == 'system-product-name':
- return 'HVM domU' # Nothing 'openstackish' on EC2
- if dmi_key == 'chassis-asset-tag':
- return '' # Empty string on EC2
- assert False, 'Unexpected dmi read of %s' % dmi_key
-
- m_dmi.side_effect = fake_dmi_read
- self.assertFalse(
- ds.detect_openstack(), 'Expected detect_openstack == False on EC2')
- m_proc_env.assert_called_with(1)
-
- @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data')
- def test_detect_openstack_intel_product_name_compute(self, m_dmi,
- m_is_x86):
- """Return True on OpenStack compute and nova instances."""
- m_is_x86.return_value = True
- openstack_product_names = ['OpenStack Nova', 'OpenStack Compute']
-
- for product_name in openstack_product_names:
- m_dmi.return_value = product_name
- self.assertTrue(
- ds.detect_openstack(), 'Failed to detect_openstack')
-
- @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data')
- def test_detect_openstack_opentelekomcloud_chassis_asset_tag(self, m_dmi,
- m_is_x86):
- """Return True on OpenStack reporting OpenTelekomCloud asset-tag."""
- m_is_x86.return_value = True
-
- def fake_dmi_read(dmi_key):
- if dmi_key == 'system-product-name':
- return 'HVM domU' # Nothing 'openstackish' on OpenTelekomCloud
- if dmi_key == 'chassis-asset-tag':
- return 'OpenTelekomCloud'
- assert False, 'Unexpected dmi read of %s' % dmi_key
-
- m_dmi.side_effect = fake_dmi_read
- self.assertTrue(
- ds.detect_openstack(),
- 'Expected detect_openstack == True on OpenTelekomCloud')
-
- @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data')
- def test_detect_openstack_sapccloud_chassis_asset_tag(self, m_dmi,
- m_is_x86):
- """Return True on OpenStack reporting SAP CCloud VM asset-tag."""
- m_is_x86.return_value = True
-
- def fake_dmi_read(dmi_key):
- if dmi_key == 'system-product-name':
- return 'VMware Virtual Platform' # SAP CCloud uses VMware
- if dmi_key == 'chassis-asset-tag':
- return 'SAP CCloud VM'
- assert False, 'Unexpected dmi read of %s' % dmi_key
-
- m_dmi.side_effect = fake_dmi_read
- self.assertTrue(
- ds.detect_openstack(),
- 'Expected detect_openstack == True on SAP CCloud VM')
-
- @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data')
- def test_detect_openstack_oraclecloud_chassis_asset_tag(self, m_dmi,
- m_is_x86):
- """Return True on OpenStack reporting Oracle cloud asset-tag."""
- m_is_x86.return_value = True
-
- def fake_dmi_read(dmi_key):
- if dmi_key == 'system-product-name':
- return 'Standard PC (i440FX + PIIX, 1996)' # No match
- if dmi_key == 'chassis-asset-tag':
- return 'OracleCloud.com'
- assert False, 'Unexpected dmi read of %s' % dmi_key
-
- m_dmi.side_effect = fake_dmi_read
- self.assertTrue(
- ds.detect_openstack(accept_oracle=True),
- 'Expected detect_openstack == True on OracleCloud.com')
- self.assertFalse(
- ds.detect_openstack(accept_oracle=False),
- 'Expected detect_openstack == False.')
-
- def _test_detect_openstack_nova_compute_chassis_asset_tag(self, m_dmi,
- m_is_x86,
- chassis_tag):
- """Return True on OpenStack reporting generic asset-tag."""
- m_is_x86.return_value = True
-
- def fake_dmi_read(dmi_key):
- if dmi_key == 'system-product-name':
- return 'Generic OpenStack Platform'
- if dmi_key == 'chassis-asset-tag':
- return chassis_tag
- assert False, 'Unexpected dmi read of %s' % dmi_key
-
- m_dmi.side_effect = fake_dmi_read
- self.assertTrue(
- ds.detect_openstack(),
- 'Expected detect_openstack == True on Generic OpenStack Platform')
-
- @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data')
- def test_detect_openstack_nova_chassis_asset_tag(self, m_dmi,
- m_is_x86):
- self._test_detect_openstack_nova_compute_chassis_asset_tag(
- m_dmi, m_is_x86, 'OpenStack Nova')
-
- @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data')
- def test_detect_openstack_compute_chassis_asset_tag(self, m_dmi,
- m_is_x86):
- self._test_detect_openstack_nova_compute_chassis_asset_tag(
- m_dmi, m_is_x86, 'OpenStack Compute')
-
- @test_helpers.mock.patch(MOCK_PATH + 'util.get_proc_env')
- @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data')
- def test_detect_openstack_by_proc_1_environ(self, m_dmi, m_proc_env,
- m_is_x86):
- """Return True when nova product_name specified in /proc/1/environ."""
- m_is_x86.return_value = True
- # Nova product_name in proc/1/environ
- m_proc_env.return_value = {
- 'HOME': '/', 'product_name': 'OpenStack Nova'}
-
- def fake_dmi_read(dmi_key):
- if dmi_key == 'system-product-name':
- return 'HVM domU' # Nothing 'openstackish'
- if dmi_key == 'chassis-asset-tag':
- return '' # Nothin 'openstackish'
- assert False, 'Unexpected dmi read of %s' % dmi_key
-
- m_dmi.side_effect = fake_dmi_read
- self.assertTrue(
- ds.detect_openstack(),
- 'Expected detect_openstack == True on OpenTelekomCloud')
- m_proc_env.assert_called_with(1)
-
-
-class TestMetadataReader(test_helpers.HttprettyTestCase):
- """Test the MetadataReader."""
- burl = 'http://169.254.169.254/'
- md_base = {
- 'availability_zone': 'myaz1',
- 'hostname': 'sm-foo-test.novalocal',
- "keys": [{"data": PUBKEY, "name": "brickies", "type": "ssh"}],
- 'launch_index': 0,
- 'name': 'sm-foo-test',
- 'public_keys': {'mykey': PUBKEY},
- 'project_id': '6a103f813b774b9fb15a4fcd36e1c056',
- 'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c'}
-
- def register(self, path, body=None, status=200):
- content = body if not isinstance(body, str) else body.encode('utf-8')
- hp.register_uri(
- hp.GET, self.burl + "openstack" + path, status=status,
- body=content)
-
- def register_versions(self, versions):
- self.register("", '\n'.join(versions))
- self.register("/", '\n'.join(versions))
-
- def register_version(self, version, data):
- content = '\n'.join(sorted(data.keys()))
- self.register(version, content)
- self.register(version + "/", content)
- for path, content in data.items():
- self.register("/%s/%s" % (version, path), content)
- self.register("/%s/%s" % (version, path), content)
- if 'user_data' not in data:
- self.register("/%s/user_data" % version, "nodata", status=404)
-
- def test__find_working_version(self):
- """Test a working version ignores unsupported."""
- unsup = "2016-11-09"
- self.register_versions(
- [openstack.OS_FOLSOM, openstack.OS_LIBERTY, unsup,
- openstack.OS_LATEST])
- self.assertEqual(
- openstack.OS_LIBERTY,
- openstack.MetadataReader(self.burl)._find_working_version())
-
- def test__find_working_version_uses_latest(self):
- """'latest' should be used if no supported versions."""
- unsup1, unsup2 = ("2016-11-09", '2017-06-06')
- self.register_versions([unsup1, unsup2, openstack.OS_LATEST])
- self.assertEqual(
- openstack.OS_LATEST,
- openstack.MetadataReader(self.burl)._find_working_version())
-
- def test_read_v2_os_ocata(self):
- """Validate return value of read_v2 for os_ocata data."""
- md = copy.deepcopy(self.md_base)
- md['devices'] = []
- network_data = {'links': [], 'networks': [], 'services': []}
- vendor_data = {}
- vendor_data2 = {"static": {}}
-
- data = {
- 'meta_data.json': json.dumps(md),
- 'network_data.json': json.dumps(network_data),
- 'vendor_data.json': json.dumps(vendor_data),
- 'vendor_data2.json': json.dumps(vendor_data2),
- }
-
- self.register_versions([openstack.OS_OCATA, openstack.OS_LATEST])
- self.register_version(openstack.OS_OCATA, data)
-
- mock_read_ec2 = test_helpers.mock.MagicMock(
- return_value={'instance-id': 'unused-ec2'})
- expected_md = copy.deepcopy(md)
- expected_md.update(
- {'instance-id': md['uuid'], 'local-hostname': md['hostname']})
- expected = {
- 'userdata': '', # Annoying, no user-data results in empty string.
- 'version': 2,
- 'metadata': expected_md,
- 'vendordata': vendor_data,
- 'networkdata': network_data,
- 'ec2-metadata': mock_read_ec2.return_value,
- 'files': {},
- }
- reader = openstack.MetadataReader(self.burl)
- reader._read_ec2_metadata = mock_read_ec2
- self.assertEqual(expected, reader.read_v2())
- self.assertEqual(1, mock_read_ec2.call_count)
-
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_ovf.py b/tests/unittests/test_datasource/test_ovf.py
deleted file mode 100644
index 16773de5..00000000
--- a/tests/unittests/test_datasource/test_ovf.py
+++ /dev/null
@@ -1,544 +0,0 @@
-# Copyright (C) 2016 Canonical Ltd.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-#
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import base64
-import os
-
-from collections import OrderedDict
-from textwrap import dedent
-
-from cloudinit import subp
-from cloudinit import util
-from cloudinit.tests.helpers import CiTestCase, mock, wrap_and_call
-from cloudinit.helpers import Paths
-from cloudinit.sources import DataSourceOVF as dsovf
-from cloudinit.sources.helpers.vmware.imc.config_custom_script import (
- CustomScriptNotFound)
-
-MPATH = 'cloudinit.sources.DataSourceOVF.'
-
-NOT_FOUND = None
-
-OVF_ENV_CONTENT = """<?xml version="1.0" encoding="UTF-8"?>
-<Environment xmlns="http://schemas.dmtf.org/ovf/environment/1"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xmlns:oe="http://schemas.dmtf.org/ovf/environment/1"
- xsi:schemaLocation="http://schemas.dmtf.org/ovf/environment/1 ../dsp8027.xsd"
- oe:id="WebTier">
- <!-- Information about hypervisor platform -->
- <oe:PlatformSection>
- <Kind>ESX Server</Kind>
- <Version>3.0.1</Version>
- <Vendor>VMware, Inc.</Vendor>
- <Locale>en_US</Locale>
- </oe:PlatformSection>
- <!--- Properties defined for this virtual machine -->
- <PropertySection>
-{properties}
- </PropertySection>
-</Environment>
-"""
-
-
-def fill_properties(props, template=OVF_ENV_CONTENT):
- lines = []
- prop_tmpl = '<Property oe:key="{key}" oe:value="{val}"/>'
- for key, val in props.items():
- lines.append(prop_tmpl.format(key=key, val=val))
- indent = " "
- properties = ''.join([indent + line + "\n" for line in lines])
- return template.format(properties=properties)
-
-
-class TestReadOvfEnv(CiTestCase):
- def test_with_b64_userdata(self):
- user_data = "#!/bin/sh\necho hello world\n"
- user_data_b64 = base64.b64encode(user_data.encode()).decode()
- props = {"user-data": user_data_b64, "password": "passw0rd",
- "instance-id": "inst-001"}
- env = fill_properties(props)
- md, ud, cfg = dsovf.read_ovf_environment(env)
- self.assertEqual({"instance-id": "inst-001"}, md)
- self.assertEqual(user_data.encode(), ud)
- self.assertEqual({'password': "passw0rd"}, cfg)
-
- def test_with_non_b64_userdata(self):
- user_data = "my-user-data"
- props = {"user-data": user_data, "instance-id": "inst-001"}
- env = fill_properties(props)
- md, ud, cfg = dsovf.read_ovf_environment(env)
- self.assertEqual({"instance-id": "inst-001"}, md)
- self.assertEqual(user_data.encode(), ud)
- self.assertEqual({}, cfg)
-
- def test_with_no_userdata(self):
- props = {"password": "passw0rd", "instance-id": "inst-001"}
- env = fill_properties(props)
- md, ud, cfg = dsovf.read_ovf_environment(env)
- self.assertEqual({"instance-id": "inst-001"}, md)
- self.assertEqual({'password': "passw0rd"}, cfg)
- self.assertIsNone(ud)
-
-
-class TestMarkerFiles(CiTestCase):
-
- def setUp(self):
- super(TestMarkerFiles, self).setUp()
- self.tdir = self.tmp_dir()
-
- def test_false_when_markerid_none(self):
- """Return False when markerid provided is None."""
- self.assertFalse(
- dsovf.check_marker_exists(markerid=None, marker_dir=self.tdir))
-
- def test_markerid_file_exist(self):
- """Return False when markerid file path does not exist,
- True otherwise."""
- self.assertFalse(
- dsovf.check_marker_exists('123', self.tdir))
-
- marker_file = self.tmp_path('.markerfile-123.txt', self.tdir)
- util.write_file(marker_file, '')
- self.assertTrue(
- dsovf.check_marker_exists('123', self.tdir)
- )
-
- def test_marker_file_setup(self):
- """Test creation of marker files."""
- markerfilepath = self.tmp_path('.markerfile-hi.txt', self.tdir)
- self.assertFalse(os.path.exists(markerfilepath))
- dsovf.setup_marker_files(markerid='hi', marker_dir=self.tdir)
- self.assertTrue(os.path.exists(markerfilepath))
-
-
-class TestDatasourceOVF(CiTestCase):
-
- with_logs = True
-
- def setUp(self):
- super(TestDatasourceOVF, self).setUp()
- self.datasource = dsovf.DataSourceOVF
- self.tdir = self.tmp_dir()
-
- def test_get_data_false_on_none_dmi_data(self):
- """When dmi for system-product-name is None, get_data returns False."""
- paths = Paths({'cloud_dir': self.tdir})
- ds = self.datasource(sys_cfg={}, distro={}, paths=paths)
- retcode = wrap_and_call(
- 'cloudinit.sources.DataSourceOVF',
- {'dmi.read_dmi_data': None,
- 'transport_iso9660': NOT_FOUND,
- 'transport_vmware_guestinfo': NOT_FOUND},
- ds.get_data)
- self.assertFalse(retcode, 'Expected False return from ds.get_data')
- self.assertIn(
- 'DEBUG: No system-product-name found', self.logs.getvalue())
-
- def test_get_data_no_vmware_customization_disabled(self):
- """When vmware customization is disabled via sys_cfg log a message."""
- paths = Paths({'cloud_dir': self.tdir})
- ds = self.datasource(
- sys_cfg={'disable_vmware_customization': True}, distro={},
- paths=paths)
- retcode = wrap_and_call(
- 'cloudinit.sources.DataSourceOVF',
- {'dmi.read_dmi_data': 'vmware',
- 'transport_iso9660': NOT_FOUND,
- 'transport_vmware_guestinfo': NOT_FOUND},
- ds.get_data)
- self.assertFalse(retcode, 'Expected False return from ds.get_data')
- self.assertIn(
- 'DEBUG: Customization for VMware platform is disabled.',
- self.logs.getvalue())
-
- def test_get_data_vmware_customization_disabled(self):
- """When cloud-init workflow for vmware is enabled via sys_cfg log a
- message.
- """
- paths = Paths({'cloud_dir': self.tdir})
- ds = self.datasource(
- sys_cfg={'disable_vmware_customization': False}, distro={},
- paths=paths)
- conf_file = self.tmp_path('test-cust', self.tdir)
- conf_content = dedent("""\
- [CUSTOM-SCRIPT]
- SCRIPT-NAME = test-script
- [MISC]
- MARKER-ID = 12345345
- """)
- util.write_file(conf_file, conf_content)
- with mock.patch(MPATH + 'get_tools_config', return_value='true'):
- with self.assertRaises(CustomScriptNotFound) as context:
- wrap_and_call(
- 'cloudinit.sources.DataSourceOVF',
- {'dmi.read_dmi_data': 'vmware',
- 'util.del_dir': True,
- 'search_file': self.tdir,
- 'wait_for_imc_cfg_file': conf_file,
- 'get_nics_to_enable': ''},
- ds.get_data)
- customscript = self.tmp_path('test-script', self.tdir)
- self.assertIn('Script %s not found!!' % customscript,
- str(context.exception))
-
- def test_get_data_cust_script_disabled(self):
- """If custom script is disabled by VMware tools configuration,
- raise a RuntimeError.
- """
- paths = Paths({'cloud_dir': self.tdir})
- ds = self.datasource(
- sys_cfg={'disable_vmware_customization': False}, distro={},
- paths=paths)
- # Prepare the conf file
- conf_file = self.tmp_path('test-cust', self.tdir)
- conf_content = dedent("""\
- [CUSTOM-SCRIPT]
- SCRIPT-NAME = test-script
- [MISC]
- MARKER-ID = 12345346
- """)
- util.write_file(conf_file, conf_content)
- # Prepare the custom sript
- customscript = self.tmp_path('test-script', self.tdir)
- util.write_file(customscript, "This is the post cust script")
-
- with mock.patch(MPATH + 'get_tools_config', return_value='invalid'):
- with mock.patch(MPATH + 'set_customization_status',
- return_value=('msg', b'')):
- with self.assertRaises(RuntimeError) as context:
- wrap_and_call(
- 'cloudinit.sources.DataSourceOVF',
- {'dmi.read_dmi_data': 'vmware',
- 'util.del_dir': True,
- 'search_file': self.tdir,
- 'wait_for_imc_cfg_file': conf_file,
- 'get_nics_to_enable': ''},
- ds.get_data)
- self.assertIn('Custom script is disabled by VM Administrator',
- str(context.exception))
-
- def test_get_data_cust_script_enabled(self):
- """If custom script is enabled by VMware tools configuration,
- execute the script.
- """
- paths = Paths({'cloud_dir': self.tdir})
- ds = self.datasource(
- sys_cfg={'disable_vmware_customization': False}, distro={},
- paths=paths)
- # Prepare the conf file
- conf_file = self.tmp_path('test-cust', self.tdir)
- conf_content = dedent("""\
- [CUSTOM-SCRIPT]
- SCRIPT-NAME = test-script
- [MISC]
- MARKER-ID = 12345346
- """)
- util.write_file(conf_file, conf_content)
-
- # Mock custom script is enabled by return true when calling
- # get_tools_config
- with mock.patch(MPATH + 'get_tools_config', return_value="true"):
- with mock.patch(MPATH + 'set_customization_status',
- return_value=('msg', b'')):
- with self.assertRaises(CustomScriptNotFound) as context:
- wrap_and_call(
- 'cloudinit.sources.DataSourceOVF',
- {'dmi.read_dmi_data': 'vmware',
- 'util.del_dir': True,
- 'search_file': self.tdir,
- 'wait_for_imc_cfg_file': conf_file,
- 'get_nics_to_enable': ''},
- ds.get_data)
- # Verify custom script is trying to be executed
- customscript = self.tmp_path('test-script', self.tdir)
- self.assertIn('Script %s not found!!' % customscript,
- str(context.exception))
-
- def test_get_data_force_run_post_script_is_yes(self):
- """If DEFAULT-RUN-POST-CUST-SCRIPT is yes, custom script could run if
- enable-custom-scripts is not defined in VM Tools configuration
- """
- paths = Paths({'cloud_dir': self.tdir})
- ds = self.datasource(
- sys_cfg={'disable_vmware_customization': False}, distro={},
- paths=paths)
- # Prepare the conf file
- conf_file = self.tmp_path('test-cust', self.tdir)
- # set DEFAULT-RUN-POST-CUST-SCRIPT = yes so that enable-custom-scripts
- # default value is TRUE
- conf_content = dedent("""\
- [CUSTOM-SCRIPT]
- SCRIPT-NAME = test-script
- [MISC]
- MARKER-ID = 12345346
- DEFAULT-RUN-POST-CUST-SCRIPT = yes
- """)
- util.write_file(conf_file, conf_content)
-
- # Mock get_tools_config(section, key, defaultVal) to return
- # defaultVal
- def my_get_tools_config(*args, **kwargs):
- return args[2]
-
- with mock.patch(MPATH + 'get_tools_config',
- side_effect=my_get_tools_config):
- with mock.patch(MPATH + 'set_customization_status',
- return_value=('msg', b'')):
- with self.assertRaises(CustomScriptNotFound) as context:
- wrap_and_call(
- 'cloudinit.sources.DataSourceOVF',
- {'dmi.read_dmi_data': 'vmware',
- 'util.del_dir': True,
- 'search_file': self.tdir,
- 'wait_for_imc_cfg_file': conf_file,
- 'get_nics_to_enable': ''},
- ds.get_data)
- # Verify custom script still runs although it is
- # disabled by VMware Tools
- customscript = self.tmp_path('test-script', self.tdir)
- self.assertIn('Script %s not found!!' % customscript,
- str(context.exception))
-
- def test_get_data_non_vmware_seed_platform_info(self):
- """Platform info properly reports when on non-vmware platforms."""
- paths = Paths({'cloud_dir': self.tdir, 'run_dir': self.tdir})
- # Write ovf-env.xml seed file
- seed_dir = self.tmp_path('seed', dir=self.tdir)
- ovf_env = self.tmp_path('ovf-env.xml', dir=seed_dir)
- util.write_file(ovf_env, OVF_ENV_CONTENT)
- ds = self.datasource(sys_cfg={}, distro={}, paths=paths)
-
- self.assertEqual('ovf', ds.cloud_name)
- self.assertEqual('ovf', ds.platform_type)
- with mock.patch(MPATH + 'dmi.read_dmi_data', return_value='!VMware'):
- with mock.patch(MPATH + 'transport_vmware_guestinfo') as m_guestd:
- with mock.patch(MPATH + 'transport_iso9660') as m_iso9660:
- m_iso9660.return_value = NOT_FOUND
- m_guestd.return_value = NOT_FOUND
- self.assertTrue(ds.get_data())
- self.assertEqual(
- 'ovf (%s/seed/ovf-env.xml)' % self.tdir,
- ds.subplatform)
-
- def test_get_data_vmware_seed_platform_info(self):
- """Platform info properly reports when on VMware platform."""
- paths = Paths({'cloud_dir': self.tdir, 'run_dir': self.tdir})
- # Write ovf-env.xml seed file
- seed_dir = self.tmp_path('seed', dir=self.tdir)
- ovf_env = self.tmp_path('ovf-env.xml', dir=seed_dir)
- util.write_file(ovf_env, OVF_ENV_CONTENT)
- ds = self.datasource(sys_cfg={}, distro={}, paths=paths)
-
- self.assertEqual('ovf', ds.cloud_name)
- self.assertEqual('ovf', ds.platform_type)
- with mock.patch(MPATH + 'dmi.read_dmi_data', return_value='VMWare'):
- with mock.patch(MPATH + 'transport_vmware_guestinfo') as m_guestd:
- with mock.patch(MPATH + 'transport_iso9660') as m_iso9660:
- m_iso9660.return_value = NOT_FOUND
- m_guestd.return_value = NOT_FOUND
- self.assertTrue(ds.get_data())
- self.assertEqual(
- 'vmware (%s/seed/ovf-env.xml)' % self.tdir,
- ds.subplatform)
-
-
-class TestTransportIso9660(CiTestCase):
-
- def setUp(self):
- super(TestTransportIso9660, self).setUp()
- self.add_patch('cloudinit.util.find_devs_with',
- 'm_find_devs_with')
- self.add_patch('cloudinit.util.mounts', 'm_mounts')
- self.add_patch('cloudinit.util.mount_cb', 'm_mount_cb')
- self.add_patch('cloudinit.sources.DataSourceOVF.get_ovf_env',
- 'm_get_ovf_env')
- self.m_get_ovf_env.return_value = ('myfile', 'mycontent')
-
- def test_find_already_mounted(self):
- """Check we call get_ovf_env from on matching mounted devices"""
- mounts = {
- '/dev/sr9': {
- 'fstype': 'iso9660',
- 'mountpoint': 'wark/media/sr9',
- 'opts': 'ro',
- }
- }
- self.m_mounts.return_value = mounts
-
- self.assertEqual("mycontent", dsovf.transport_iso9660())
-
- def test_find_already_mounted_skips_non_iso9660(self):
- """Check we call get_ovf_env ignoring non iso9660"""
- mounts = {
- '/dev/xvdb': {
- 'fstype': 'vfat',
- 'mountpoint': 'wark/foobar',
- 'opts': 'defaults,noatime',
- },
- '/dev/xvdc': {
- 'fstype': 'iso9660',
- 'mountpoint': 'wark/media/sr9',
- 'opts': 'ro',
- }
- }
- # We use an OrderedDict here to ensure we check xvdb before xvdc
- # as we're not mocking the regex matching, however, if we place
- # an entry in the results then we can be reasonably sure that
- # we're skipping an entry which fails to match.
- self.m_mounts.return_value = (
- OrderedDict(sorted(mounts.items(), key=lambda t: t[0])))
-
- self.assertEqual("mycontent", dsovf.transport_iso9660())
-
- def test_find_already_mounted_matches_kname(self):
- """Check we dont regex match on basename of the device"""
- mounts = {
- '/dev/foo/bar/xvdc': {
- 'fstype': 'iso9660',
- 'mountpoint': 'wark/media/sr9',
- 'opts': 'ro',
- }
- }
- # we're skipping an entry which fails to match.
- self.m_mounts.return_value = mounts
-
- self.assertEqual(NOT_FOUND, dsovf.transport_iso9660())
-
- def test_mount_cb_called_on_blkdevs_with_iso9660(self):
- """Check we call mount_cb on blockdevs with iso9660 only"""
- self.m_mounts.return_value = {}
- self.m_find_devs_with.return_value = ['/dev/sr0']
- self.m_mount_cb.return_value = ("myfile", "mycontent")
-
- self.assertEqual("mycontent", dsovf.transport_iso9660())
- self.m_mount_cb.assert_called_with(
- "/dev/sr0", dsovf.get_ovf_env, mtype="iso9660")
-
- def test_mount_cb_called_on_blkdevs_with_iso9660_check_regex(self):
- """Check we call mount_cb on blockdevs with iso9660 and match regex"""
- self.m_mounts.return_value = {}
- self.m_find_devs_with.return_value = [
- '/dev/abc', '/dev/my-cdrom', '/dev/sr0']
- self.m_mount_cb.return_value = ("myfile", "mycontent")
-
- self.assertEqual("mycontent", dsovf.transport_iso9660())
- self.m_mount_cb.assert_called_with(
- "/dev/sr0", dsovf.get_ovf_env, mtype="iso9660")
-
- def test_mount_cb_not_called_no_matches(self):
- """Check we don't call mount_cb if nothing matches"""
- self.m_mounts.return_value = {}
- self.m_find_devs_with.return_value = ['/dev/vg/myovf']
-
- self.assertEqual(NOT_FOUND, dsovf.transport_iso9660())
- self.assertEqual(0, self.m_mount_cb.call_count)
-
- def test_mount_cb_called_require_iso_false(self):
- """Check we call mount_cb on blockdevs with require_iso=False"""
- self.m_mounts.return_value = {}
- self.m_find_devs_with.return_value = ['/dev/xvdz']
- self.m_mount_cb.return_value = ("myfile", "mycontent")
-
- self.assertEqual(
- "mycontent", dsovf.transport_iso9660(require_iso=False))
-
- self.m_mount_cb.assert_called_with(
- "/dev/xvdz", dsovf.get_ovf_env, mtype=None)
-
- def test_maybe_cdrom_device_none(self):
- """Test maybe_cdrom_device returns False for none/empty input"""
- self.assertFalse(dsovf.maybe_cdrom_device(None))
- self.assertFalse(dsovf.maybe_cdrom_device(''))
-
- def test_maybe_cdrom_device_non_string_exception(self):
- """Test maybe_cdrom_device raises ValueError on non-string types"""
- with self.assertRaises(ValueError):
- dsovf.maybe_cdrom_device({'a': 'eleven'})
-
- def test_maybe_cdrom_device_false_on_multi_dir_paths(self):
- """Test maybe_cdrom_device is false on /dev[/.*]/* paths"""
- self.assertFalse(dsovf.maybe_cdrom_device('/dev/foo/sr0'))
- self.assertFalse(dsovf.maybe_cdrom_device('foo/sr0'))
- self.assertFalse(dsovf.maybe_cdrom_device('../foo/sr0'))
- self.assertFalse(dsovf.maybe_cdrom_device('../foo/sr0'))
-
- def test_maybe_cdrom_device_true_on_hd_partitions(self):
- """Test maybe_cdrom_device is false on /dev/hd[a-z][0-9]+ paths"""
- self.assertTrue(dsovf.maybe_cdrom_device('/dev/hda1'))
- self.assertTrue(dsovf.maybe_cdrom_device('hdz9'))
-
- def test_maybe_cdrom_device_true_on_valid_relative_paths(self):
- """Test maybe_cdrom_device normalizes paths"""
- self.assertTrue(dsovf.maybe_cdrom_device('/dev/wark/../sr9'))
- self.assertTrue(dsovf.maybe_cdrom_device('///sr0'))
- self.assertTrue(dsovf.maybe_cdrom_device('/sr0'))
- self.assertTrue(dsovf.maybe_cdrom_device('//dev//hda'))
-
- def test_maybe_cdrom_device_true_on_xvd_partitions(self):
- """Test maybe_cdrom_device returns true on xvd*"""
- self.assertTrue(dsovf.maybe_cdrom_device('/dev/xvda'))
- self.assertTrue(dsovf.maybe_cdrom_device('/dev/xvda1'))
- self.assertTrue(dsovf.maybe_cdrom_device('xvdza1'))
-
-
-@mock.patch(MPATH + "subp.which")
-@mock.patch(MPATH + "subp.subp")
-class TestTransportVmwareGuestinfo(CiTestCase):
- """Test the com.vmware.guestInfo transport implemented in
- transport_vmware_guestinfo."""
-
- rpctool = 'vmware-rpctool'
- with_logs = True
- rpctool_path = '/not/important/vmware-rpctool'
-
- def test_without_vmware_rpctool_returns_notfound(self, m_subp, m_which):
- m_which.return_value = None
- self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo())
- self.assertEqual(0, m_subp.call_count,
- "subp should not be called if no rpctool in path.")
-
- def test_notfound_on_exit_code_1(self, m_subp, m_which):
- """If vmware-rpctool exits 1, then must return not found."""
- m_which.return_value = self.rpctool_path
- m_subp.side_effect = subp.ProcessExecutionError(
- stdout="", stderr="No value found", exit_code=1, cmd=["unused"])
- self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo())
- self.assertEqual(1, m_subp.call_count)
- self.assertNotIn("WARNING", self.logs.getvalue(),
- "exit code of 1 by rpctool should not cause warning.")
-
- def test_notfound_if_no_content_but_exit_zero(self, m_subp, m_which):
- """If vmware-rpctool exited 0 with no stdout is normal not-found.
-
- This isn't actually a case I've seen. normally on "not found",
- rpctool would exit 1 with 'No value found' on stderr. But cover
- the case where it exited 0 and just wrote nothing to stdout.
- """
- m_which.return_value = self.rpctool_path
- m_subp.return_value = ('', '')
- self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo())
- self.assertEqual(1, m_subp.call_count)
-
- def test_notfound_and_warns_on_unexpected_exit_code(self, m_subp, m_which):
- """If vmware-rpctool exits non zero or 1, warnings should be logged."""
- m_which.return_value = self.rpctool_path
- m_subp.side_effect = subp.ProcessExecutionError(
- stdout=None, stderr="No value found", exit_code=2, cmd=["unused"])
- self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo())
- self.assertEqual(1, m_subp.call_count)
- self.assertIn("WARNING", self.logs.getvalue(),
- "exit code of 2 by rpctool should log WARNING.")
-
- def test_found_when_guestinfo_present(self, m_subp, m_which):
- """When there is a ovf info, transport should return it."""
- m_which.return_value = self.rpctool_path
- content = fill_properties({})
- m_subp.return_value = (content, '')
- self.assertEqual(content, dsovf.transport_vmware_guestinfo())
- self.assertEqual(1, m_subp.call_count)
-
-#
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_rbx.py b/tests/unittests/test_datasource/test_rbx.py
deleted file mode 100644
index d017510e..00000000
--- a/tests/unittests/test_datasource/test_rbx.py
+++ /dev/null
@@ -1,238 +0,0 @@
-import json
-
-from cloudinit import helpers
-from cloudinit import distros
-from cloudinit.sources import DataSourceRbxCloud as ds
-from cloudinit.tests.helpers import mock, CiTestCase, populate_dir
-from cloudinit import subp
-
-DS_PATH = "cloudinit.sources.DataSourceRbxCloud"
-
-CRYPTO_PASS = "$6$uktth46t$FvpDzFD2iL9YNZIG1Epz7957hJqbH0f" \
- "QKhnzcfBcUhEodGAWRqTy7tYG4nEW7SUOYBjxOSFIQW5" \
- "tToyGP41.s1"
-
-CLOUD_METADATA = {
- "vm": {
- "memory": 4,
- "cpu": 2,
- "name": "vm-image-builder",
- "_id": "5beab44f680cffd11f0e60fc"
- },
- "additionalMetadata": {
- "username": "guru",
- "sshKeys": ["ssh-rsa ..."],
- "password": {
- "sha512": CRYPTO_PASS
- }
- },
- "disk": [
- {"size": 10, "type": "ssd",
- "name": "vm-image-builder-os",
- "_id": "5beab450680cffd11f0e60fe"},
- {"size": 2, "type": "ssd",
- "name": "ubuntu-1804-bionic",
- "_id": "5bef002c680cffd11f107590"}
- ],
- "netadp": [
- {
- "ip": [{"address": "62.181.8.174"}],
- "network": {
- "dns": {"nameservers": ["8.8.8.8", "8.8.4.4"]},
- "routing": [],
- "gateway": "62.181.8.1",
- "netmask": "255.255.248.0",
- "name": "public",
- "type": "public",
- "_id": "5784e97be2627505227b578c"
- },
- "speed": 1000,
- "type": "hv",
- "macaddress": "00:15:5D:FF:0F:03",
- "_id": "5beab450680cffd11f0e6102"
- },
- {
- "ip": [{"address": "10.209.78.11"}],
- "network": {
- "dns": {"nameservers": ["9.9.9.9", "8.8.8.8"]},
- "routing": [],
- "gateway": "10.209.78.1",
- "netmask": "255.255.255.0",
- "name": "network-determined-bardeen",
- "type": "private",
- "_id": "5beaec64680cffd11f0e7c31"
- },
- "speed": 1000,
- "type": "hv",
- "macaddress": "00:15:5D:FF:0F:24",
- "_id": "5bec18c6680cffd11f0f0d8b"
- }
- ],
- "dvddrive": [{"iso": {}}]
-}
-
-
-class TestRbxDataSource(CiTestCase):
- parsed_user = None
- allowed_subp = ['bash']
-
- def _fetch_distro(self, kind):
- cls = distros.fetch(kind)
- paths = helpers.Paths({})
- return cls(kind, {}, paths)
-
- def setUp(self):
- super(TestRbxDataSource, self).setUp()
- self.tmp = self.tmp_dir()
- self.paths = helpers.Paths(
- {'cloud_dir': self.tmp, 'run_dir': self.tmp}
- )
-
- # defaults for few tests
- self.ds = ds.DataSourceRbxCloud
- self.seed_dir = self.paths.seed_dir
- self.sys_cfg = {'datasource': {'RbxCloud': {'dsmode': 'local'}}}
-
- def test_seed_read_user_data_callback_empty_file(self):
- populate_user_metadata(self.seed_dir, '')
- populate_cloud_metadata(self.seed_dir, {})
- results = ds.read_user_data_callback(self.seed_dir)
-
- self.assertIsNone(results)
-
- def test_seed_read_user_data_callback_valid_disk(self):
- populate_user_metadata(self.seed_dir, '')
- populate_cloud_metadata(self.seed_dir, CLOUD_METADATA)
- results = ds.read_user_data_callback(self.seed_dir)
-
- self.assertNotEqual(results, None)
- self.assertTrue('userdata' in results)
- self.assertTrue('metadata' in results)
- self.assertTrue('cfg' in results)
-
- def test_seed_read_user_data_callback_userdata(self):
- userdata = "#!/bin/sh\nexit 1"
- populate_user_metadata(self.seed_dir, userdata)
- populate_cloud_metadata(self.seed_dir, CLOUD_METADATA)
-
- results = ds.read_user_data_callback(self.seed_dir)
-
- self.assertNotEqual(results, None)
- self.assertTrue('userdata' in results)
- self.assertEqual(results['userdata'], userdata)
-
- def test_generate_network_config(self):
- expected = {
- 'version': 1,
- 'config': [
- {
- 'subnets': [
- {'control': 'auto',
- 'dns_nameservers': ['8.8.8.8', '8.8.4.4'],
- 'netmask': '255.255.248.0',
- 'address': '62.181.8.174',
- 'type': 'static', 'gateway': '62.181.8.1'}
- ],
- 'type': 'physical',
- 'name': 'eth0',
- 'mac_address': '00:15:5d:ff:0f:03'
- },
- {
- 'subnets': [
- {'control': 'auto',
- 'dns_nameservers': ['9.9.9.9', '8.8.8.8'],
- 'netmask': '255.255.255.0',
- 'address': '10.209.78.11',
- 'type': 'static',
- 'gateway': '10.209.78.1'}
- ],
- 'type': 'physical',
- 'name': 'eth1',
- 'mac_address': '00:15:5d:ff:0f:24'
- }
- ]
- }
- self.assertTrue(
- ds.generate_network_config(CLOUD_METADATA['netadp']),
- expected
- )
-
- @mock.patch(DS_PATH + '.subp.subp')
- def test_gratuitous_arp_run_standard_arping(self, m_subp):
- """Test handle run arping & parameters."""
- items = [
- {
- 'destination': '172.17.0.2',
- 'source': '172.16.6.104'
- },
- {
- 'destination': '172.17.0.2',
- 'source': '172.16.6.104',
- },
- ]
- ds.gratuitous_arp(items, self._fetch_distro('ubuntu'))
- self.assertEqual([
- mock.call([
- 'arping', '-c', '2', '-S',
- '172.16.6.104', '172.17.0.2'
- ]),
- mock.call([
- 'arping', '-c', '2', '-S',
- '172.16.6.104', '172.17.0.2'
- ])
- ], m_subp.call_args_list
- )
-
- @mock.patch(DS_PATH + '.subp.subp')
- def test_handle_rhel_like_arping(self, m_subp):
- """Test handle on RHEL-like distros."""
- items = [
- {
- 'source': '172.16.6.104',
- 'destination': '172.17.0.2',
- }
- ]
- ds.gratuitous_arp(items, self._fetch_distro('fedora'))
- self.assertEqual([
- mock.call(
- ['arping', '-c', '2', '-s', '172.16.6.104', '172.17.0.2']
- )],
- m_subp.call_args_list
- )
-
- @mock.patch(
- DS_PATH + '.subp.subp',
- side_effect=subp.ProcessExecutionError()
- )
- def test_continue_on_arping_error(self, m_subp):
- """Continue when command error"""
- items = [
- {
- 'destination': '172.17.0.2',
- 'source': '172.16.6.104'
- },
- {
- 'destination': '172.17.0.2',
- 'source': '172.16.6.104',
- },
- ]
- ds.gratuitous_arp(items, self._fetch_distro('ubuntu'))
- self.assertEqual([
- mock.call([
- 'arping', '-c', '2', '-S',
- '172.16.6.104', '172.17.0.2'
- ]),
- mock.call([
- 'arping', '-c', '2', '-S',
- '172.16.6.104', '172.17.0.2'
- ])
- ], m_subp.call_args_list
- )
-
-
-def populate_cloud_metadata(path, data):
- populate_dir(path, {'cloud.json': json.dumps(data)})
-
-
-def populate_user_metadata(path, data):
- populate_dir(path, {'user.data': data})
diff --git a/tests/unittests/test_datasource/test_scaleway.py b/tests/unittests/test_datasource/test_scaleway.py
deleted file mode 100644
index 32f3274a..00000000
--- a/tests/unittests/test_datasource/test_scaleway.py
+++ /dev/null
@@ -1,473 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import json
-
-import httpretty
-import requests
-
-from cloudinit import helpers
-from cloudinit import settings
-from cloudinit import sources
-from cloudinit.sources import DataSourceScaleway
-
-from cloudinit.tests.helpers import mock, HttprettyTestCase, CiTestCase
-
-
-class DataResponses(object):
- """
- Possible responses of the API endpoint
- 169.254.42.42/user_data/cloud-init and
- 169.254.42.42/vendor_data/cloud-init.
- """
-
- FAKE_USER_DATA = '#!/bin/bash\necho "user-data"'
-
- @staticmethod
- def rate_limited(method, uri, headers):
- return 429, headers, ''
-
- @staticmethod
- def api_error(method, uri, headers):
- return 500, headers, ''
-
- @classmethod
- def get_ok(cls, method, uri, headers):
- return 200, headers, cls.FAKE_USER_DATA
-
- @staticmethod
- def empty(method, uri, headers):
- """
- No user data for this server.
- """
- return 404, headers, ''
-
-
-class MetadataResponses(object):
- """
- Possible responses of the metadata API.
- """
-
- FAKE_METADATA = {
- 'id': '00000000-0000-0000-0000-000000000000',
- 'hostname': 'scaleway.host',
- 'tags': [
- "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD",
- ],
- 'ssh_public_keys': [{
- 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA',
- 'fingerprint': '2048 06:ae:... login (RSA)'
- }, {
- 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC',
- 'fingerprint': '2048 06:ff:... login2 (RSA)'
- }]
- }
-
- @classmethod
- def get_ok(cls, method, uri, headers):
- return 200, headers, json.dumps(cls.FAKE_METADATA)
-
-
-class TestOnScaleway(CiTestCase):
-
- def setUp(self):
- super(TestOnScaleway, self).setUp()
- self.tmp = self.tmp_dir()
-
- def install_mocks(self, fake_dmi, fake_file_exists, fake_cmdline):
- mock, faked = fake_dmi
- mock.return_value = 'Scaleway' if faked else 'Whatever'
-
- mock, faked = fake_file_exists
- mock.return_value = faked
-
- mock, faked = fake_cmdline
- mock.return_value = \
- 'initrd=initrd showopts scaleway nousb' if faked \
- else 'BOOT_IMAGE=/vmlinuz-3.11.0-26-generic'
-
- @mock.patch('cloudinit.util.get_cmdline')
- @mock.patch('os.path.exists')
- @mock.patch('cloudinit.dmi.read_dmi_data')
- def test_not_on_scaleway(self, m_read_dmi_data, m_file_exists,
- m_get_cmdline):
- self.install_mocks(
- fake_dmi=(m_read_dmi_data, False),
- fake_file_exists=(m_file_exists, False),
- fake_cmdline=(m_get_cmdline, False)
- )
- self.assertFalse(DataSourceScaleway.on_scaleway())
-
- # When not on Scaleway, get_data() returns False.
- datasource = DataSourceScaleway.DataSourceScaleway(
- settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})
- )
- self.assertFalse(datasource.get_data())
-
- @mock.patch('cloudinit.util.get_cmdline')
- @mock.patch('os.path.exists')
- @mock.patch('cloudinit.dmi.read_dmi_data')
- def test_on_scaleway_dmi(self, m_read_dmi_data, m_file_exists,
- m_get_cmdline):
- """
- dmidecode returns "Scaleway".
- """
- # dmidecode returns "Scaleway"
- self.install_mocks(
- fake_dmi=(m_read_dmi_data, True),
- fake_file_exists=(m_file_exists, False),
- fake_cmdline=(m_get_cmdline, False)
- )
- self.assertTrue(DataSourceScaleway.on_scaleway())
-
- @mock.patch('cloudinit.util.get_cmdline')
- @mock.patch('os.path.exists')
- @mock.patch('cloudinit.dmi.read_dmi_data')
- def test_on_scaleway_var_run_scaleway(self, m_read_dmi_data, m_file_exists,
- m_get_cmdline):
- """
- /var/run/scaleway exists.
- """
- self.install_mocks(
- fake_dmi=(m_read_dmi_data, False),
- fake_file_exists=(m_file_exists, True),
- fake_cmdline=(m_get_cmdline, False)
- )
- self.assertTrue(DataSourceScaleway.on_scaleway())
-
- @mock.patch('cloudinit.util.get_cmdline')
- @mock.patch('os.path.exists')
- @mock.patch('cloudinit.dmi.read_dmi_data')
- def test_on_scaleway_cmdline(self, m_read_dmi_data, m_file_exists,
- m_get_cmdline):
- """
- "scaleway" in /proc/cmdline.
- """
- self.install_mocks(
- fake_dmi=(m_read_dmi_data, False),
- fake_file_exists=(m_file_exists, False),
- fake_cmdline=(m_get_cmdline, True)
- )
- self.assertTrue(DataSourceScaleway.on_scaleway())
-
-
-def get_source_address_adapter(*args, **kwargs):
- """
- Scaleway user/vendor data API requires to be called with a privileged port.
-
- If the unittests are run as non-root, the user doesn't have the permission
- to bind on ports below 1024.
-
- This function removes the bind on a privileged address, since anyway the
- HTTP call is mocked by httpretty.
- """
- kwargs.pop('source_address')
- return requests.adapters.HTTPAdapter(*args, **kwargs)
-
-
-class TestDataSourceScaleway(HttprettyTestCase):
-
- def setUp(self):
- tmp = self.tmp_dir()
- self.datasource = DataSourceScaleway.DataSourceScaleway(
- settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': tmp})
- )
- super(TestDataSourceScaleway, self).setUp()
-
- self.metadata_url = \
- DataSourceScaleway.BUILTIN_DS_CONFIG['metadata_url']
- self.userdata_url = \
- DataSourceScaleway.BUILTIN_DS_CONFIG['userdata_url']
- self.vendordata_url = \
- DataSourceScaleway.BUILTIN_DS_CONFIG['vendordata_url']
-
- self.add_patch('cloudinit.sources.DataSourceScaleway.on_scaleway',
- '_m_on_scaleway', return_value=True)
- self.add_patch(
- 'cloudinit.sources.DataSourceScaleway.net.find_fallback_nic',
- '_m_find_fallback_nic', return_value='scalewaynic0')
-
- @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4')
- @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter',
- get_source_address_adapter)
- @mock.patch('cloudinit.util.get_cmdline')
- @mock.patch('time.sleep', return_value=None)
- def test_metadata_ok(self, sleep, m_get_cmdline, dhcpv4):
- """
- get_data() returns metadata, user data and vendor data.
- """
- m_get_cmdline.return_value = 'scaleway'
-
- # Make user data API return a valid response
- httpretty.register_uri(httpretty.GET, self.metadata_url,
- body=MetadataResponses.get_ok)
- httpretty.register_uri(httpretty.GET, self.userdata_url,
- body=DataResponses.get_ok)
- httpretty.register_uri(httpretty.GET, self.vendordata_url,
- body=DataResponses.get_ok)
- self.datasource.get_data()
-
- self.assertEqual(self.datasource.get_instance_id(),
- MetadataResponses.FAKE_METADATA['id'])
- self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [
- u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC',
- u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD',
- u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA',
- ].sort())
- self.assertEqual(self.datasource.get_hostname(),
- MetadataResponses.FAKE_METADATA['hostname'])
- self.assertEqual(self.datasource.get_userdata_raw(),
- DataResponses.FAKE_USER_DATA)
- self.assertEqual(self.datasource.get_vendordata_raw(),
- DataResponses.FAKE_USER_DATA)
- self.assertIsNone(self.datasource.availability_zone)
- self.assertIsNone(self.datasource.region)
- self.assertEqual(sleep.call_count, 0)
-
- def test_ssh_keys_empty(self):
- """
- get_public_ssh_keys() should return empty list if no ssh key are
- available
- """
- self.datasource.metadata['tags'] = []
- self.datasource.metadata['ssh_public_keys'] = []
- self.assertEqual(self.datasource.get_public_ssh_keys(), [])
-
- def test_ssh_keys_only_tags(self):
- """
- get_public_ssh_keys() should return list of keys available in tags
- """
- self.datasource.metadata['tags'] = [
- "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD",
- "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABCCCCC",
- ]
- self.datasource.metadata['ssh_public_keys'] = []
- self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [
- u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD',
- u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC',
- ].sort())
-
- def test_ssh_keys_only_conf(self):
- """
- get_public_ssh_keys() should return list of keys available in
- ssh_public_keys field
- """
- self.datasource.metadata['tags'] = []
- self.datasource.metadata['ssh_public_keys'] = [{
- 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA',
- 'fingerprint': '2048 06:ae:... login (RSA)'
- }, {
- 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC',
- 'fingerprint': '2048 06:ff:... login2 (RSA)'
- }]
- self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [
- u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC',
- u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD',
- u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA',
- ].sort())
-
- def test_ssh_keys_both(self):
- """
- get_public_ssh_keys() should return a merge of keys available
- in ssh_public_keys and tags
- """
- self.datasource.metadata['tags'] = [
- "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD",
- ]
-
- self.datasource.metadata['ssh_public_keys'] = [{
- 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA',
- 'fingerprint': '2048 06:ae:... login (RSA)'
- }, {
- 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC',
- 'fingerprint': '2048 06:ff:... login2 (RSA)'
- }]
- self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [
- u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC',
- u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD',
- u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA',
- ].sort())
-
- @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4')
- @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter',
- get_source_address_adapter)
- @mock.patch('cloudinit.util.get_cmdline')
- @mock.patch('time.sleep', return_value=None)
- def test_metadata_404(self, sleep, m_get_cmdline, dhcpv4):
- """
- get_data() returns metadata, but no user data nor vendor data.
- """
- m_get_cmdline.return_value = 'scaleway'
-
- # Make user and vendor data APIs return HTTP/404, which means there is
- # no user / vendor data for the server.
- httpretty.register_uri(httpretty.GET, self.metadata_url,
- body=MetadataResponses.get_ok)
- httpretty.register_uri(httpretty.GET, self.userdata_url,
- body=DataResponses.empty)
- httpretty.register_uri(httpretty.GET, self.vendordata_url,
- body=DataResponses.empty)
- self.datasource.get_data()
- self.assertIsNone(self.datasource.get_userdata_raw())
- self.assertIsNone(self.datasource.get_vendordata_raw())
- self.assertEqual(sleep.call_count, 0)
-
- @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4')
- @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter',
- get_source_address_adapter)
- @mock.patch('cloudinit.util.get_cmdline')
- @mock.patch('time.sleep', return_value=None)
- def test_metadata_rate_limit(self, sleep, m_get_cmdline, dhcpv4):
- """
- get_data() is rate limited two times by the metadata API when fetching
- user data.
- """
- m_get_cmdline.return_value = 'scaleway'
-
- httpretty.register_uri(httpretty.GET, self.metadata_url,
- body=MetadataResponses.get_ok)
- httpretty.register_uri(httpretty.GET, self.vendordata_url,
- body=DataResponses.empty)
-
- httpretty.register_uri(
- httpretty.GET, self.userdata_url,
- responses=[
- httpretty.Response(body=DataResponses.rate_limited),
- httpretty.Response(body=DataResponses.rate_limited),
- httpretty.Response(body=DataResponses.get_ok),
- ]
- )
- self.datasource.get_data()
- self.assertEqual(self.datasource.get_userdata_raw(),
- DataResponses.FAKE_USER_DATA)
- self.assertEqual(sleep.call_count, 2)
-
- @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic')
- @mock.patch('cloudinit.util.get_cmdline')
- def test_network_config_ok(self, m_get_cmdline, fallback_nic):
- """
- network_config will only generate IPv4 config if no ipv6 data is
- available in the metadata
- """
- m_get_cmdline.return_value = 'scaleway'
- fallback_nic.return_value = 'ens2'
- self.datasource.metadata['ipv6'] = None
-
- netcfg = self.datasource.network_config
- resp = {
- 'version': 1,
- 'config': [
- {
- 'type': 'physical',
- 'name': 'ens2',
- 'subnets': [{'type': 'dhcp4'}]
- }
- ]
- }
- self.assertEqual(netcfg, resp)
-
- @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic')
- @mock.patch('cloudinit.util.get_cmdline')
- def test_network_config_ipv6_ok(self, m_get_cmdline, fallback_nic):
- """
- network_config will only generate IPv4/v6 configs if ipv6 data is
- available in the metadata
- """
- m_get_cmdline.return_value = 'scaleway'
- fallback_nic.return_value = 'ens2'
- self.datasource.metadata['ipv6'] = {
- 'address': '2000:abc:4444:9876::42:999',
- 'gateway': '2000:abc:4444:9876::42:000',
- 'netmask': '127',
- }
-
- netcfg = self.datasource.network_config
- resp = {
- 'version': 1,
- 'config': [
- {
- 'type': 'physical',
- 'name': 'ens2',
- 'subnets': [
- {
- 'type': 'dhcp4'
- },
- {
- 'type': 'static',
- 'address': '2000:abc:4444:9876::42:999',
- 'gateway': '2000:abc:4444:9876::42:000',
- 'netmask': '127',
- }
- ]
- }
- ]
- }
- self.assertEqual(netcfg, resp)
-
- @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic')
- @mock.patch('cloudinit.util.get_cmdline')
- def test_network_config_existing(self, m_get_cmdline, fallback_nic):
- """
- network_config() should return the same data if a network config
- already exists
- """
- m_get_cmdline.return_value = 'scaleway'
- self.datasource._network_config = '0xdeadbeef'
-
- netcfg = self.datasource.network_config
- self.assertEqual(netcfg, '0xdeadbeef')
-
- @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic')
- @mock.patch('cloudinit.util.get_cmdline')
- def test_network_config_unset(self, m_get_cmdline, fallback_nic):
- """
- _network_config will be set to sources.UNSET after the first boot.
- Make sure it behave correctly.
- """
- m_get_cmdline.return_value = 'scaleway'
- fallback_nic.return_value = 'ens2'
- self.datasource.metadata['ipv6'] = None
- self.datasource._network_config = sources.UNSET
-
- resp = {
- 'version': 1,
- 'config': [
- {
- 'type': 'physical',
- 'name': 'ens2',
- 'subnets': [{'type': 'dhcp4'}]
- }
- ]
- }
-
- netcfg = self.datasource.network_config
- self.assertEqual(netcfg, resp)
-
- @mock.patch('cloudinit.sources.DataSourceScaleway.LOG.warning')
- @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic')
- @mock.patch('cloudinit.util.get_cmdline')
- def test_network_config_cached_none(self, m_get_cmdline, fallback_nic,
- logwarning):
- """
- network_config() should return config data if cached data is None
- rather than sources.UNSET
- """
- m_get_cmdline.return_value = 'scaleway'
- fallback_nic.return_value = 'ens2'
- self.datasource.metadata['ipv6'] = None
- self.datasource._network_config = None
-
- resp = {
- 'version': 1,
- 'config': [
- {
- 'type': 'physical',
- 'name': 'ens2',
- 'subnets': [{'type': 'dhcp4'}]
- }
- ]
- }
-
- netcfg = self.datasource.network_config
- self.assertEqual(netcfg, resp)
- logwarning.assert_called_with('Found None as cached _network_config. '
- 'Resetting to %s', sources.UNSET)
diff --git a/tests/unittests/test_dhclient_hook.py b/tests/unittests/test_dhclient_hook.py
new file mode 100644
index 00000000..7e5b54c0
--- /dev/null
+++ b/tests/unittests/test_dhclient_hook.py
@@ -0,0 +1,112 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Tests for cloudinit.dhclient_hook."""
+
+import argparse
+import json
+import os
+from unittest import mock
+
+from cloudinit import dhclient_hook as dhc
+from tests.unittests.helpers import CiTestCase, dir2dict, populate_dir
+
+
+class TestDhclientHook(CiTestCase):
+
+ ex_env = {
+ "interface": "eth0",
+ "new_dhcp_lease_time": "3600",
+ "new_host_name": "x1",
+ "new_ip_address": "10.145.210.163",
+ "new_subnet_mask": "255.255.255.0",
+ "old_host_name": "x1",
+ "PATH": "/usr/sbin:/usr/bin:/sbin:/bin",
+ "pid": "614",
+ "reason": "BOUND",
+ }
+
+ # some older versions of dhclient put the same content,
+ # but in upper case with DHCP4_ instead of new_
+ ex_env_dhcp4 = {
+ "REASON": "BOUND",
+ "DHCP4_dhcp_lease_time": "3600",
+ "DHCP4_host_name": "x1",
+ "DHCP4_ip_address": "10.145.210.163",
+ "DHCP4_subnet_mask": "255.255.255.0",
+ "INTERFACE": "eth0",
+ "PATH": "/usr/sbin:/usr/bin:/sbin:/bin",
+ "pid": "614",
+ }
+
+ expected = {
+ "dhcp_lease_time": "3600",
+ "host_name": "x1",
+ "ip_address": "10.145.210.163",
+ "subnet_mask": "255.255.255.0",
+ }
+
+ def setUp(self):
+ super(TestDhclientHook, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ def test_handle_args(self):
+ """quick test of call to handle_args."""
+ nic = "eth0"
+ args = argparse.Namespace(event=dhc.UP, interface=nic)
+ with mock.patch.dict("os.environ", clear=True, values=self.ex_env):
+ dhc.handle_args(dhc.NAME, args, data_d=self.tmp)
+ found = dir2dict(self.tmp + os.path.sep)
+ self.assertEqual([nic + ".json"], list(found.keys()))
+ self.assertEqual(self.expected, json.loads(found[nic + ".json"]))
+
+ def test_run_hook_up_creates_dir(self):
+ """If dir does not exist, run_hook should create it."""
+ subd = self.tmp_path("subdir", self.tmp)
+ nic = "eth1"
+ dhc.run_hook(nic, "up", data_d=subd, env=self.ex_env)
+ self.assertEqual(
+ set([nic + ".json"]), set(dir2dict(subd + os.path.sep))
+ )
+
+ def test_run_hook_up(self):
+ """Test expected use of run_hook_up."""
+ nic = "eth0"
+ dhc.run_hook(nic, "up", data_d=self.tmp, env=self.ex_env)
+ found = dir2dict(self.tmp + os.path.sep)
+ self.assertEqual([nic + ".json"], list(found.keys()))
+ self.assertEqual(self.expected, json.loads(found[nic + ".json"]))
+
+ def test_run_hook_up_dhcp4_prefix(self):
+ """Test run_hook filters correctly with older DHCP4_ data."""
+ nic = "eth0"
+ dhc.run_hook(nic, "up", data_d=self.tmp, env=self.ex_env_dhcp4)
+ found = dir2dict(self.tmp + os.path.sep)
+ self.assertEqual([nic + ".json"], list(found.keys()))
+ self.assertEqual(self.expected, json.loads(found[nic + ".json"]))
+
+ def test_run_hook_down_deletes(self):
+ """down should delete the created json file."""
+ nic = "eth1"
+ populate_dir(
+ self.tmp, {nic + ".json": "{'abcd'}", "myfile.txt": "text"}
+ )
+ dhc.run_hook(nic, "down", data_d=self.tmp, env={"old_host_name": "x1"})
+ self.assertEqual(
+ set(["myfile.txt"]), set(dir2dict(self.tmp + os.path.sep))
+ )
+
+ def test_get_parser(self):
+ """Smoke test creation of get_parser."""
+ # cloud-init main uses 'action'.
+ event, interface = (dhc.UP, "mynic0")
+ self.assertEqual(
+ argparse.Namespace(
+ event=event,
+ interface=interface,
+ action=(dhc.NAME, dhc.handle_args),
+ ),
+ dhc.get_parser().parse_args([event, interface]),
+ )
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_distros/test_arch.py b/tests/unittests/test_distros/test_arch.py
deleted file mode 100644
index a95ba3b5..00000000
--- a/tests/unittests/test_distros/test_arch.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit.distros.arch import _render_network
-from cloudinit import util
-
-from cloudinit.tests.helpers import (CiTestCase, dir2dict)
-
-from . import _get_distro
-
-
-class TestArch(CiTestCase):
-
- def test_get_distro(self):
- distro = _get_distro("arch")
- hostname = "myhostname"
- hostfile = self.tmp_path("hostfile")
- distro._write_hostname(hostname, hostfile)
- self.assertEqual(hostname + "\n", util.load_file(hostfile))
-
-
-class TestRenderNetwork(CiTestCase):
- def test_basic_static(self):
- """Just the most basic static config.
-
- note 'lo' should not be rendered as an interface."""
- entries = {'eth0': {'auto': True,
- 'dns-nameservers': ['8.8.8.8'],
- 'bootproto': 'static',
- 'address': '10.0.0.2',
- 'gateway': '10.0.0.1',
- 'netmask': '255.255.255.0'},
- 'lo': {'auto': True}}
- target = self.tmp_dir()
- devs = _render_network(entries, target=target)
- files = dir2dict(target, prefix=target)
- self.assertEqual(['eth0'], devs)
- self.assertEqual(
- {'/etc/netctl/eth0': '\n'.join([
- "Address=10.0.0.2/255.255.255.0",
- "Connection=ethernet",
- "DNS=('8.8.8.8')",
- "Gateway=10.0.0.1",
- "IP=static",
- "Interface=eth0", ""]),
- '/etc/resolv.conf': 'nameserver 8.8.8.8\n'}, files)
diff --git a/tests/unittests/test_distros/test_bsd_utils.py b/tests/unittests/test_distros/test_bsd_utils.py
deleted file mode 100644
index 3a68f2a9..00000000
--- a/tests/unittests/test_distros/test_bsd_utils.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import cloudinit.distros.bsd_utils as bsd_utils
-
-from cloudinit.tests.helpers import (CiTestCase, ExitStack, mock)
-
-RC_FILE = """
-if something; then
- do something here
-fi
-hostname={hostname}
-"""
-
-
-class TestBsdUtils(CiTestCase):
-
- def setUp(self):
- super().setUp()
- patches = ExitStack()
- self.addCleanup(patches.close)
-
- self.load_file = patches.enter_context(
- mock.patch.object(bsd_utils.util, 'load_file'))
-
- self.write_file = patches.enter_context(
- mock.patch.object(bsd_utils.util, 'write_file'))
-
- def test_get_rc_config_value(self):
- self.load_file.return_value = 'hostname=foo\n'
- self.assertEqual(bsd_utils.get_rc_config_value('hostname'), 'foo')
- self.load_file.assert_called_with('/etc/rc.conf')
-
- self.load_file.return_value = 'hostname=foo'
- self.assertEqual(bsd_utils.get_rc_config_value('hostname'), 'foo')
-
- self.load_file.return_value = 'hostname="foo"'
- self.assertEqual(bsd_utils.get_rc_config_value('hostname'), 'foo')
-
- self.load_file.return_value = "hostname='foo'"
- self.assertEqual(bsd_utils.get_rc_config_value('hostname'), 'foo')
-
- self.load_file.return_value = 'hostname=\'foo"'
- self.assertEqual(bsd_utils.get_rc_config_value('hostname'), "'foo\"")
-
- self.load_file.return_value = ''
- self.assertEqual(bsd_utils.get_rc_config_value('hostname'), None)
-
- self.load_file.return_value = RC_FILE.format(hostname='foo')
- self.assertEqual(bsd_utils.get_rc_config_value('hostname'), "foo")
-
- def test_set_rc_config_value_unchanged(self):
- # bsd_utils.set_rc_config_value('hostname', 'foo')
- # self.write_file.assert_called_with('/etc/rc.conf', 'hostname=foo\n')
-
- self.load_file.return_value = RC_FILE.format(hostname='foo')
- self.write_file.assert_not_called()
-
- def test_set_rc_config_value(self):
- bsd_utils.set_rc_config_value('hostname', 'foo')
- self.write_file.assert_called_with('/etc/rc.conf', 'hostname=foo\n')
-
- self.load_file.return_value = RC_FILE.format(hostname='foo')
- bsd_utils.set_rc_config_value('hostname', 'bar')
- self.write_file.assert_called_with(
- '/etc/rc.conf',
- RC_FILE.format(hostname='bar')
- )
diff --git a/tests/unittests/test_distros/test_create_users.py b/tests/unittests/test_distros/test_create_users.py
deleted file mode 100644
index 94ab052d..00000000
--- a/tests/unittests/test_distros/test_create_users.py
+++ /dev/null
@@ -1,271 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import re
-
-from cloudinit import distros
-from cloudinit import ssh_util
-from cloudinit.tests.helpers import (CiTestCase, mock)
-
-
-class MyBaseDistro(distros.Distro):
- # MyBaseDistro is here to test base Distro class implementations
-
- def __init__(self, name="basedistro", cfg=None, paths=None):
- if not cfg:
- cfg = {}
- if not paths:
- paths = {}
- super(MyBaseDistro, self).__init__(name, cfg, paths)
-
- def install_packages(self, pkglist):
- raise NotImplementedError()
-
- def _write_network(self, settings):
- raise NotImplementedError()
-
- def package_command(self, cmd, args=None, pkgs=None):
- raise NotImplementedError()
-
- def update_package_sources(self):
- raise NotImplementedError()
-
- def apply_locale(self, locale, out_fn=None):
- raise NotImplementedError()
-
- def set_timezone(self, tz):
- raise NotImplementedError()
-
- def _read_hostname(self, filename, default=None):
- raise NotImplementedError()
-
- def _write_hostname(self, hostname, filename):
- raise NotImplementedError()
-
- def _read_system_hostname(self):
- raise NotImplementedError()
-
-
-@mock.patch("cloudinit.distros.util.system_is_snappy", return_value=False)
-@mock.patch("cloudinit.distros.subp.subp")
-class TestCreateUser(CiTestCase):
-
- with_logs = True
-
- def setUp(self):
- super(TestCreateUser, self).setUp()
- self.dist = MyBaseDistro()
-
- def _useradd2call(self, args):
- # return a mock call for the useradd command in args
- # with expected 'logstring'.
- args = ['useradd'] + args
- logcmd = [a for a in args]
- for i in range(len(args)):
- if args[i] in ('--password',):
- logcmd[i + 1] = 'REDACTED'
- return mock.call(args, logstring=logcmd)
-
- def test_basic(self, m_subp, m_is_snappy):
- user = 'foouser'
- self.dist.create_user(user)
- self.assertEqual(
- m_subp.call_args_list,
- [self._useradd2call([user, '-m']),
- mock.call(['passwd', '-l', user])])
-
- def test_no_home(self, m_subp, m_is_snappy):
- user = 'foouser'
- self.dist.create_user(user, no_create_home=True)
- self.assertEqual(
- m_subp.call_args_list,
- [self._useradd2call([user, '-M']),
- mock.call(['passwd', '-l', user])])
-
- def test_system_user(self, m_subp, m_is_snappy):
- # system user should have no home and get --system
- user = 'foouser'
- self.dist.create_user(user, system=True)
- self.assertEqual(
- m_subp.call_args_list,
- [self._useradd2call([user, '--system', '-M']),
- mock.call(['passwd', '-l', user])])
-
- def test_explicit_no_home_false(self, m_subp, m_is_snappy):
- user = 'foouser'
- self.dist.create_user(user, no_create_home=False)
- self.assertEqual(
- m_subp.call_args_list,
- [self._useradd2call([user, '-m']),
- mock.call(['passwd', '-l', user])])
-
- def test_unlocked(self, m_subp, m_is_snappy):
- user = 'foouser'
- self.dist.create_user(user, lock_passwd=False)
- self.assertEqual(
- m_subp.call_args_list,
- [self._useradd2call([user, '-m'])])
-
- def test_set_password(self, m_subp, m_is_snappy):
- user = 'foouser'
- password = 'passfoo'
- self.dist.create_user(user, passwd=password)
- self.assertEqual(
- m_subp.call_args_list,
- [self._useradd2call([user, '--password', password, '-m']),
- mock.call(['passwd', '-l', user])])
-
- @mock.patch("cloudinit.distros.util.is_group")
- def test_group_added(self, m_is_group, m_subp, m_is_snappy):
- m_is_group.return_value = False
- user = 'foouser'
- self.dist.create_user(user, groups=['group1'])
- expected = [
- mock.call(['groupadd', 'group1']),
- self._useradd2call([user, '--groups', 'group1', '-m']),
- mock.call(['passwd', '-l', user])]
- self.assertEqual(m_subp.call_args_list, expected)
-
- @mock.patch("cloudinit.distros.util.is_group")
- def test_only_new_group_added(self, m_is_group, m_subp, m_is_snappy):
- ex_groups = ['existing_group']
- groups = ['group1', ex_groups[0]]
- m_is_group.side_effect = lambda m: m in ex_groups
- user = 'foouser'
- self.dist.create_user(user, groups=groups)
- expected = [
- mock.call(['groupadd', 'group1']),
- self._useradd2call([user, '--groups', ','.join(groups), '-m']),
- mock.call(['passwd', '-l', user])]
- self.assertEqual(m_subp.call_args_list, expected)
-
- @mock.patch("cloudinit.distros.util.is_group")
- def test_create_groups_with_whitespace_string(
- self, m_is_group, m_subp, m_is_snappy):
- # groups supported as a comma delimeted string even with white space
- m_is_group.return_value = False
- user = 'foouser'
- self.dist.create_user(user, groups='group1, group2')
- expected = [
- mock.call(['groupadd', 'group1']),
- mock.call(['groupadd', 'group2']),
- self._useradd2call([user, '--groups', 'group1,group2', '-m']),
- mock.call(['passwd', '-l', user])]
- self.assertEqual(m_subp.call_args_list, expected)
-
- def test_explicit_sudo_false(self, m_subp, m_is_snappy):
- user = 'foouser'
- self.dist.create_user(user, sudo=False)
- self.assertEqual(
- m_subp.call_args_list,
- [self._useradd2call([user, '-m']),
- mock.call(['passwd', '-l', user])])
-
- @mock.patch('cloudinit.ssh_util.setup_user_keys')
- def test_setup_ssh_authorized_keys_with_string(
- self, m_setup_user_keys, m_subp, m_is_snappy):
- """ssh_authorized_keys allows string and calls setup_user_keys."""
- user = 'foouser'
- self.dist.create_user(user, ssh_authorized_keys='mykey')
- self.assertEqual(
- m_subp.call_args_list,
- [self._useradd2call([user, '-m']),
- mock.call(['passwd', '-l', user])])
- m_setup_user_keys.assert_called_once_with(set(['mykey']), user)
-
- @mock.patch('cloudinit.ssh_util.setup_user_keys')
- def test_setup_ssh_authorized_keys_with_list(
- self, m_setup_user_keys, m_subp, m_is_snappy):
- """ssh_authorized_keys allows lists and calls setup_user_keys."""
- user = 'foouser'
- self.dist.create_user(user, ssh_authorized_keys=['key1', 'key2'])
- self.assertEqual(
- m_subp.call_args_list,
- [self._useradd2call([user, '-m']),
- mock.call(['passwd', '-l', user])])
- m_setup_user_keys.assert_called_once_with(set(['key1', 'key2']), user)
-
- @mock.patch('cloudinit.ssh_util.setup_user_keys')
- def test_setup_ssh_authorized_keys_with_integer(
- self, m_setup_user_keys, m_subp, m_is_snappy):
- """ssh_authorized_keys warns on non-iterable/string type."""
- user = 'foouser'
- self.dist.create_user(user, ssh_authorized_keys=-1)
- m_setup_user_keys.assert_called_once_with(set([]), user)
- match = re.match(
- r'.*WARNING: Invalid type \'<(type|class) \'int\'>\' detected for'
- ' \'ssh_authorized_keys\'.*',
- self.logs.getvalue(),
- re.DOTALL)
- self.assertIsNotNone(
- match, 'Missing ssh_authorized_keys invalid type warning')
-
- @mock.patch('cloudinit.ssh_util.setup_user_keys')
- def test_create_user_with_ssh_redirect_user_no_cloud_keys(
- self, m_setup_user_keys, m_subp, m_is_snappy):
- """Log a warning when trying to redirect a user no cloud ssh keys."""
- user = 'foouser'
- self.dist.create_user(user, ssh_redirect_user='someuser')
- self.assertIn(
- 'WARNING: Unable to disable SSH logins for foouser given '
- 'ssh_redirect_user: someuser. No cloud public-keys present.\n',
- self.logs.getvalue())
- m_setup_user_keys.assert_not_called()
-
- @mock.patch('cloudinit.ssh_util.setup_user_keys')
- def test_create_user_with_ssh_redirect_user_with_cloud_keys(
- self, m_setup_user_keys, m_subp, m_is_snappy):
- """Disable ssh when ssh_redirect_user and cloud ssh keys are set."""
- user = 'foouser'
- self.dist.create_user(
- user, ssh_redirect_user='someuser', cloud_public_ssh_keys=['key1'])
- disable_prefix = ssh_util.DISABLE_USER_OPTS
- disable_prefix = disable_prefix.replace('$USER', 'someuser')
- disable_prefix = disable_prefix.replace('$DISABLE_USER', user)
- m_setup_user_keys.assert_called_once_with(
- set(['key1']), 'foouser', options=disable_prefix)
-
- @mock.patch('cloudinit.ssh_util.setup_user_keys')
- def test_create_user_with_ssh_redirect_user_does_not_disable_auth_keys(
- self, m_setup_user_keys, m_subp, m_is_snappy):
- """Do not disable ssh_authorized_keys when ssh_redirect_user is set."""
- user = 'foouser'
- self.dist.create_user(
- user, ssh_authorized_keys='auth1', ssh_redirect_user='someuser',
- cloud_public_ssh_keys=['key1'])
- disable_prefix = ssh_util.DISABLE_USER_OPTS
- disable_prefix = disable_prefix.replace('$USER', 'someuser')
- disable_prefix = disable_prefix.replace('$DISABLE_USER', user)
- self.assertEqual(
- m_setup_user_keys.call_args_list,
- [mock.call(set(['auth1']), user), # not disabled
- mock.call(set(['key1']), 'foouser', options=disable_prefix)])
-
- @mock.patch("cloudinit.distros.subp.which")
- def test_lock_with_usermod_if_no_passwd(self, m_which, m_subp,
- m_is_snappy):
- """Lock uses usermod --lock if no 'passwd' cmd available."""
- m_which.side_effect = lambda m: m in ('usermod',)
- self.dist.lock_passwd("bob")
- self.assertEqual(
- [mock.call(['usermod', '--lock', 'bob'])],
- m_subp.call_args_list)
-
- @mock.patch("cloudinit.distros.subp.which")
- def test_lock_with_passwd_if_available(self, m_which, m_subp,
- m_is_snappy):
- """Lock with only passwd will use passwd."""
- m_which.side_effect = lambda m: m in ('passwd',)
- self.dist.lock_passwd("bob")
- self.assertEqual(
- [mock.call(['passwd', '-l', 'bob'])],
- m_subp.call_args_list)
-
- @mock.patch("cloudinit.distros.subp.which")
- def test_lock_raises_runtime_if_no_commands(self, m_which, m_subp,
- m_is_snappy):
- """Lock with no commands available raises RuntimeError."""
- m_which.return_value = None
- with self.assertRaises(RuntimeError):
- self.dist.lock_passwd("bob")
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_distros/test_debian.py b/tests/unittests/test_distros/test_debian.py
deleted file mode 100644
index 7ff8240b..00000000
--- a/tests/unittests/test_distros/test_debian.py
+++ /dev/null
@@ -1,100 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit import distros
-from cloudinit import util
-from cloudinit.tests.helpers import (FilesystemMockingTestCase, mock)
-
-
-@mock.patch("cloudinit.distros.debian.subp.subp")
-class TestDebianApplyLocale(FilesystemMockingTestCase):
-
- def setUp(self):
- super(TestDebianApplyLocale, self).setUp()
- self.new_root = self.tmp_dir()
- self.patchOS(self.new_root)
- self.patchUtils(self.new_root)
- self.spath = self.tmp_path('etc/default/locale', self.new_root)
- cls = distros.fetch("debian")
- self.distro = cls("debian", {}, None)
-
- def test_no_rerun(self, m_subp):
- """If system has defined locale, no re-run is expected."""
- m_subp.return_value = (None, None)
- locale = 'en_US.UTF-8'
- util.write_file(self.spath, 'LANG=%s\n' % locale, omode="w")
- self.distro.apply_locale(locale, out_fn=self.spath)
- m_subp.assert_not_called()
-
- def test_no_regen_on_c_utf8(self, m_subp):
- """If locale is set to C.UTF8, do not attempt to call locale-gen"""
- m_subp.return_value = (None, None)
- locale = 'C.UTF-8'
- util.write_file(self.spath, 'LANG=%s\n' % 'en_US.UTF-8', omode="w")
- self.distro.apply_locale(locale, out_fn=self.spath)
- self.assertEqual(
- [['update-locale', '--locale-file=' + self.spath,
- 'LANG=%s' % locale]],
- [p[0][0] for p in m_subp.call_args_list])
-
- def test_rerun_if_different(self, m_subp):
- """If system has different locale, locale-gen should be called."""
- m_subp.return_value = (None, None)
- locale = 'en_US.UTF-8'
- util.write_file(self.spath, 'LANG=fr_FR.UTF-8', omode="w")
- self.distro.apply_locale(locale, out_fn=self.spath)
- self.assertEqual(
- [['locale-gen', locale],
- ['update-locale', '--locale-file=' + self.spath,
- 'LANG=%s' % locale]],
- [p[0][0] for p in m_subp.call_args_list])
-
- def test_rerun_if_no_file(self, m_subp):
- """If system has no locale file, locale-gen should be called."""
- m_subp.return_value = (None, None)
- locale = 'en_US.UTF-8'
- self.distro.apply_locale(locale, out_fn=self.spath)
- self.assertEqual(
- [['locale-gen', locale],
- ['update-locale', '--locale-file=' + self.spath,
- 'LANG=%s' % locale]],
- [p[0][0] for p in m_subp.call_args_list])
-
- def test_rerun_on_unset_system_locale(self, m_subp):
- """If system has unset locale, locale-gen should be called."""
- m_subp.return_value = (None, None)
- locale = 'en_US.UTF-8'
- util.write_file(self.spath, 'LANG=', omode="w")
- self.distro.apply_locale(locale, out_fn=self.spath)
- self.assertEqual(
- [['locale-gen', locale],
- ['update-locale', '--locale-file=' + self.spath,
- 'LANG=%s' % locale]],
- [p[0][0] for p in m_subp.call_args_list])
-
- def test_rerun_on_mismatched_keys(self, m_subp):
- """If key is LC_ALL and system has only LANG, rerun is expected."""
- m_subp.return_value = (None, None)
- locale = 'en_US.UTF-8'
- util.write_file(self.spath, 'LANG=', omode="w")
- self.distro.apply_locale(locale, out_fn=self.spath, keyname='LC_ALL')
- self.assertEqual(
- [['locale-gen', locale],
- ['update-locale', '--locale-file=' + self.spath,
- 'LC_ALL=%s' % locale]],
- [p[0][0] for p in m_subp.call_args_list])
-
- def test_falseish_locale_raises_valueerror(self, m_subp):
- """locale as None or "" is invalid and should raise ValueError."""
-
- with self.assertRaises(ValueError) as ctext_m:
- self.distro.apply_locale(None)
- m_subp.assert_not_called()
-
- self.assertEqual(
- 'Failed to provide locale value.', str(ctext_m.exception))
-
- with self.assertRaises(ValueError) as ctext_m:
- self.distro.apply_locale("")
- m_subp.assert_not_called()
- self.assertEqual(
- 'Failed to provide locale value.', str(ctext_m.exception))
diff --git a/tests/unittests/test_distros/test_generic.py b/tests/unittests/test_distros/test_generic.py
deleted file mode 100644
index 44607489..00000000
--- a/tests/unittests/test_distros/test_generic.py
+++ /dev/null
@@ -1,302 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit import distros
-from cloudinit import util
-
-from cloudinit.tests import helpers
-
-import os
-import pytest
-import shutil
-import tempfile
-from unittest import mock
-
-unknown_arch_info = {
- 'arches': ['default'],
- 'failsafe': {'primary': 'http://fs-primary-default',
- 'security': 'http://fs-security-default'}
-}
-
-package_mirrors = [
- {'arches': ['i386', 'amd64'],
- 'failsafe': {'primary': 'http://fs-primary-intel',
- 'security': 'http://fs-security-intel'},
- 'search': {
- 'primary': ['http://%(ec2_region)s.ec2/',
- 'http://%(availability_zone)s.clouds/'],
- 'security': ['http://security-mirror1-intel',
- 'http://security-mirror2-intel']}},
- {'arches': ['armhf', 'armel'],
- 'failsafe': {'primary': 'http://fs-primary-arm',
- 'security': 'http://fs-security-arm'}},
- unknown_arch_info
-]
-
-gpmi = distros._get_package_mirror_info
-gapmi = distros._get_arch_package_mirror_info
-
-
-class TestGenericDistro(helpers.FilesystemMockingTestCase):
-
- def setUp(self):
- super(TestGenericDistro, self).setUp()
- # Make a temp directoy for tests to use.
- self.tmp = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.tmp)
-
- def _write_load_sudoers(self, _user, rules):
- cls = distros.fetch("ubuntu")
- d = cls("ubuntu", {}, None)
- os.makedirs(os.path.join(self.tmp, "etc"))
- os.makedirs(os.path.join(self.tmp, "etc", 'sudoers.d'))
- self.patchOS(self.tmp)
- self.patchUtils(self.tmp)
- d.write_sudo_rules("harlowja", rules)
- contents = util.load_file(d.ci_sudoers_fn)
- return contents
-
- def _count_in(self, lines_look_for, text_content):
- found_amount = 0
- for e in lines_look_for:
- for line in text_content.splitlines():
- line = line.strip()
- if line == e:
- found_amount += 1
- return found_amount
-
- def test_sudoers_ensure_rules(self):
- rules = 'ALL=(ALL:ALL) ALL'
- contents = self._write_load_sudoers('harlowja', rules)
- expected = ['harlowja ALL=(ALL:ALL) ALL']
- self.assertEqual(len(expected), self._count_in(expected, contents))
- not_expected = [
- 'harlowja A',
- 'harlowja L',
- 'harlowja L',
- ]
- self.assertEqual(0, self._count_in(not_expected, contents))
-
- def test_sudoers_ensure_rules_list(self):
- rules = [
- 'ALL=(ALL:ALL) ALL',
- 'B-ALL=(ALL:ALL) ALL',
- 'C-ALL=(ALL:ALL) ALL',
- ]
- contents = self._write_load_sudoers('harlowja', rules)
- expected = [
- 'harlowja ALL=(ALL:ALL) ALL',
- 'harlowja B-ALL=(ALL:ALL) ALL',
- 'harlowja C-ALL=(ALL:ALL) ALL',
- ]
- self.assertEqual(len(expected), self._count_in(expected, contents))
- not_expected = [
- 'harlowja A',
- 'harlowja L',
- 'harlowja L',
- ]
- self.assertEqual(0, self._count_in(not_expected, contents))
-
- def test_sudoers_ensure_new(self):
- cls = distros.fetch("ubuntu")
- d = cls("ubuntu", {}, None)
- self.patchOS(self.tmp)
- self.patchUtils(self.tmp)
- d.ensure_sudo_dir("/b")
- contents = util.load_file("/etc/sudoers")
- self.assertIn("includedir /b", contents)
- self.assertTrue(os.path.isdir("/b"))
-
- def test_sudoers_ensure_append(self):
- cls = distros.fetch("ubuntu")
- d = cls("ubuntu", {}, None)
- self.patchOS(self.tmp)
- self.patchUtils(self.tmp)
- util.write_file("/etc/sudoers", "josh, josh\n")
- d.ensure_sudo_dir("/b")
- contents = util.load_file("/etc/sudoers")
- self.assertIn("includedir /b", contents)
- self.assertTrue(os.path.isdir("/b"))
- self.assertIn("josh", contents)
- self.assertEqual(2, contents.count("josh"))
-
- def test_arch_package_mirror_info_unknown(self):
- """for an unknown arch, we should get back that with arch 'default'."""
- arch_mirrors = gapmi(package_mirrors, arch="unknown")
- self.assertEqual(unknown_arch_info, arch_mirrors)
-
- def test_arch_package_mirror_info_known(self):
- arch_mirrors = gapmi(package_mirrors, arch="amd64")
- self.assertEqual(package_mirrors[0], arch_mirrors)
-
- def test_systemd_in_use(self):
- cls = distros.fetch("ubuntu")
- d = cls("ubuntu", {}, None)
- self.patchOS(self.tmp)
- self.patchUtils(self.tmp)
- os.makedirs('/run/systemd/system')
- self.assertTrue(d.uses_systemd())
-
- def test_systemd_not_in_use(self):
- cls = distros.fetch("ubuntu")
- d = cls("ubuntu", {}, None)
- self.patchOS(self.tmp)
- self.patchUtils(self.tmp)
- self.assertFalse(d.uses_systemd())
-
- def test_systemd_symlink(self):
- cls = distros.fetch("ubuntu")
- d = cls("ubuntu", {}, None)
- self.patchOS(self.tmp)
- self.patchUtils(self.tmp)
- os.makedirs('/run/systemd')
- os.symlink('/', '/run/systemd/system')
- self.assertFalse(d.uses_systemd())
-
- @mock.patch('cloudinit.distros.debian.read_system_locale')
- def test_get_locale_ubuntu(self, m_locale):
- """Test ubuntu distro returns locale set to C.UTF-8"""
- m_locale.return_value = 'C.UTF-8'
- cls = distros.fetch("ubuntu")
- d = cls("ubuntu", {}, None)
- locale = d.get_locale()
- self.assertEqual('C.UTF-8', locale)
-
- def test_get_locale_rhel(self):
- """Test rhel distro returns NotImplementedError exception"""
- cls = distros.fetch("rhel")
- d = cls("rhel", {}, None)
- with self.assertRaises(NotImplementedError):
- d.get_locale()
-
- def test_expire_passwd_uses_chpasswd(self):
- """Test ubuntu.expire_passwd uses the passwd command."""
- for d_name in ("ubuntu", "rhel"):
- cls = distros.fetch(d_name)
- d = cls(d_name, {}, None)
- with mock.patch("cloudinit.subp.subp") as m_subp:
- d.expire_passwd("myuser")
- m_subp.assert_called_once_with(["passwd", "--expire", "myuser"])
-
- def test_expire_passwd_freebsd_uses_pw_command(self):
- """Test FreeBSD.expire_passwd uses the pw command."""
- cls = distros.fetch("freebsd")
- d = cls("freebsd", {}, None)
- with mock.patch("cloudinit.subp.subp") as m_subp:
- d.expire_passwd("myuser")
- m_subp.assert_called_once_with(
- ["pw", "usermod", "myuser", "-p", "01-Jan-1970"])
-
-
-class TestGetPackageMirrors:
-
- def return_first(self, mlist):
- if not mlist:
- return None
- return mlist[0]
-
- def return_second(self, mlist):
- if not mlist:
- return None
-
- return mlist[1] if len(mlist) > 1 else None
-
- def return_none(self, _mlist):
- return None
-
- def return_last(self, mlist):
- if not mlist:
- return None
- return(mlist[-1])
-
- @pytest.mark.parametrize(
- "allow_ec2_mirror, platform_type, mirrors",
- [
- (True, "ec2", [
- {'primary': 'http://us-east-1.ec2/',
- 'security': 'http://security-mirror1-intel'},
- {'primary': 'http://us-east-1a.clouds/',
- 'security': 'http://security-mirror2-intel'}
- ]),
- (True, "other", [
- {'primary': 'http://us-east-1.ec2/',
- 'security': 'http://security-mirror1-intel'},
- {'primary': 'http://us-east-1a.clouds/',
- 'security': 'http://security-mirror2-intel'}
- ]),
- (False, "ec2", [
- {'primary': 'http://us-east-1.ec2/',
- 'security': 'http://security-mirror1-intel'},
- {'primary': 'http://us-east-1a.clouds/',
- 'security': 'http://security-mirror2-intel'}
- ]),
- (False, "other", [
- {'primary': 'http://us-east-1a.clouds/',
- 'security': 'http://security-mirror1-intel'},
- {'primary': 'http://fs-primary-intel',
- 'security': 'http://security-mirror2-intel'}
- ])
- ])
- def test_get_package_mirror_info_az_ec2(self,
- allow_ec2_mirror,
- platform_type,
- mirrors):
- flag_path = "cloudinit.distros." \
- "ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES"
- with mock.patch(flag_path, allow_ec2_mirror):
- arch_mirrors = gapmi(package_mirrors, arch="amd64")
- data_source_mock = mock.Mock(
- availability_zone="us-east-1a",
- platform_type=platform_type)
-
- results = gpmi(arch_mirrors, data_source=data_source_mock,
- mirror_filter=self.return_first)
- assert(results == mirrors[0])
-
- results = gpmi(arch_mirrors, data_source=data_source_mock,
- mirror_filter=self.return_second)
- assert(results == mirrors[1])
-
- results = gpmi(arch_mirrors, data_source=data_source_mock,
- mirror_filter=self.return_none)
- assert(results == package_mirrors[0]['failsafe'])
-
- def test_get_package_mirror_info_az_non_ec2(self):
- arch_mirrors = gapmi(package_mirrors, arch="amd64")
- data_source_mock = mock.Mock(availability_zone="nova.cloudvendor")
-
- results = gpmi(arch_mirrors, data_source=data_source_mock,
- mirror_filter=self.return_first)
- assert(results == {
- 'primary': 'http://nova.cloudvendor.clouds/',
- 'security': 'http://security-mirror1-intel'}
- )
-
- results = gpmi(arch_mirrors, data_source=data_source_mock,
- mirror_filter=self.return_last)
- assert(results == {
- 'primary': 'http://nova.cloudvendor.clouds/',
- 'security': 'http://security-mirror2-intel'}
- )
-
- def test_get_package_mirror_info_none(self):
- arch_mirrors = gapmi(package_mirrors, arch="amd64")
- data_source_mock = mock.Mock(availability_zone=None)
-
- # because both search entries here replacement based on
- # availability-zone, the filter will be called with an empty list and
- # failsafe should be taken.
- results = gpmi(arch_mirrors, data_source=data_source_mock,
- mirror_filter=self.return_first)
- assert(results == {
- 'primary': 'http://fs-primary-intel',
- 'security': 'http://security-mirror1-intel'}
- )
-
- results = gpmi(arch_mirrors, data_source=data_source_mock,
- mirror_filter=self.return_last)
- assert(results == {
- 'primary': 'http://fs-primary-intel',
- 'security': 'http://security-mirror2-intel'}
- )
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_distros/test_hosts.py b/tests/unittests/test_distros/test_hosts.py
deleted file mode 100644
index 8aaa6e48..00000000
--- a/tests/unittests/test_distros/test_hosts.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import unittest
-
-from cloudinit.distros.parsers import hosts
-
-
-BASE_ETC = '''
-# Example
-127.0.0.1 localhost
-192.168.1.10 foo.mydomain.org foo
-192.168.1.10 bar.mydomain.org bar
-146.82.138.7 master.debian.org master
-209.237.226.90 www.opensource.org
-'''
-BASE_ETC = BASE_ETC.strip()
-
-
-class TestHostsHelper(unittest.TestCase):
- def test_parse(self):
- eh = hosts.HostsConf(BASE_ETC)
- self.assertEqual(eh.get_entry('127.0.0.1'), [['localhost']])
- self.assertEqual(eh.get_entry('192.168.1.10'),
- [['foo.mydomain.org', 'foo'],
- ['bar.mydomain.org', 'bar']])
- eh = str(eh)
- self.assertTrue(eh.startswith('# Example'))
-
- def test_add(self):
- eh = hosts.HostsConf(BASE_ETC)
- eh.add_entry('127.0.0.0', 'blah')
- self.assertEqual(eh.get_entry('127.0.0.0'), [['blah']])
- eh.add_entry('127.0.0.3', 'blah', 'blah2', 'blah3')
- self.assertEqual(eh.get_entry('127.0.0.3'),
- [['blah', 'blah2', 'blah3']])
-
- def test_del(self):
- eh = hosts.HostsConf(BASE_ETC)
- eh.add_entry('127.0.0.0', 'blah')
- self.assertEqual(eh.get_entry('127.0.0.0'), [['blah']])
-
- eh.del_entries('127.0.0.0')
- self.assertEqual(eh.get_entry('127.0.0.0'), [])
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_distros/test_user_data_normalize.py b/tests/unittests/test_distros/test_user_data_normalize.py
deleted file mode 100644
index fa48410a..00000000
--- a/tests/unittests/test_distros/test_user_data_normalize.py
+++ /dev/null
@@ -1,374 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from unittest import mock
-
-from cloudinit import distros
-from cloudinit.distros import ug_util
-from cloudinit import helpers
-from cloudinit import settings
-
-from cloudinit.tests.helpers import TestCase
-
-
-bcfg = {
- 'name': 'bob',
- 'plain_text_passwd': 'ubuntu',
- 'home': "/home/ubuntu",
- 'shell': "/bin/bash",
- 'lock_passwd': True,
- 'gecos': "Ubuntu",
- 'groups': ["foo"]
-}
-
-
-class TestUGNormalize(TestCase):
-
- def setUp(self):
- super(TestUGNormalize, self).setUp()
- self.add_patch('cloudinit.util.system_is_snappy', 'm_snappy')
- self.add_patch('cloudinit.util.system_info', 'm_sysinfo')
- self.m_sysinfo.return_value = {'dist': ('Distro', '99.1', 'Codename')}
-
- def _make_distro(self, dtype, def_user=None):
- cfg = dict(settings.CFG_BUILTIN)
- cfg['system_info']['distro'] = dtype
- paths = helpers.Paths(cfg['system_info']['paths'])
- distro_cls = distros.fetch(dtype)
- if def_user:
- cfg['system_info']['default_user'] = def_user.copy()
- distro = distro_cls(dtype, cfg['system_info'], paths)
- return distro
-
- def _norm(self, cfg, distro):
- return ug_util.normalize_users_groups(cfg, distro)
-
- def test_group_dict(self):
- distro = self._make_distro('ubuntu')
- g = {'groups':
- [{'ubuntu': ['foo', 'bar'],
- 'bob': 'users'},
- 'cloud-users',
- {'bob': 'users2'}]}
- (_users, groups) = self._norm(g, distro)
- self.assertIn('ubuntu', groups)
- ub_members = groups['ubuntu']
- self.assertEqual(sorted(['foo', 'bar']), sorted(ub_members))
- self.assertIn('bob', groups)
- b_members = groups['bob']
- self.assertEqual(sorted(['users', 'users2']),
- sorted(b_members))
-
- def test_basic_groups(self):
- distro = self._make_distro('ubuntu')
- ug_cfg = {
- 'groups': ['bob'],
- }
- (users, groups) = self._norm(ug_cfg, distro)
- self.assertIn('bob', groups)
- self.assertEqual({}, users)
-
- def test_csv_groups(self):
- distro = self._make_distro('ubuntu')
- ug_cfg = {
- 'groups': 'bob,joe,steve',
- }
- (users, groups) = self._norm(ug_cfg, distro)
- self.assertIn('bob', groups)
- self.assertIn('joe', groups)
- self.assertIn('steve', groups)
- self.assertEqual({}, users)
-
- def test_more_groups(self):
- distro = self._make_distro('ubuntu')
- ug_cfg = {
- 'groups': ['bob', 'joe', 'steve']
- }
- (users, groups) = self._norm(ug_cfg, distro)
- self.assertIn('bob', groups)
- self.assertIn('joe', groups)
- self.assertIn('steve', groups)
- self.assertEqual({}, users)
-
- def test_member_groups(self):
- distro = self._make_distro('ubuntu')
- ug_cfg = {
- 'groups': {
- 'bob': ['s'],
- 'joe': [],
- 'steve': [],
- }
- }
- (users, groups) = self._norm(ug_cfg, distro)
- self.assertIn('bob', groups)
- self.assertEqual(['s'], groups['bob'])
- self.assertEqual([], groups['joe'])
- self.assertIn('joe', groups)
- self.assertIn('steve', groups)
- self.assertEqual({}, users)
-
- def test_users_simple_dict(self):
- distro = self._make_distro('ubuntu', bcfg)
- ug_cfg = {
- 'users': {
- 'default': True,
- }
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertIn('bob', users)
- ug_cfg = {
- 'users': {
- 'default': 'yes',
- }
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertIn('bob', users)
- ug_cfg = {
- 'users': {
- 'default': '1',
- }
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertIn('bob', users)
-
- def test_users_simple_dict_no(self):
- distro = self._make_distro('ubuntu', bcfg)
- ug_cfg = {
- 'users': {
- 'default': False,
- }
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertEqual({}, users)
- ug_cfg = {
- 'users': {
- 'default': 'no',
- }
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertEqual({}, users)
-
- def test_users_simple_csv(self):
- distro = self._make_distro('ubuntu')
- ug_cfg = {
- 'users': 'joe,bob',
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertIn('joe', users)
- self.assertIn('bob', users)
- self.assertEqual({'default': False}, users['joe'])
- self.assertEqual({'default': False}, users['bob'])
-
- def test_users_simple(self):
- distro = self._make_distro('ubuntu')
- ug_cfg = {
- 'users': [
- 'joe',
- 'bob'
- ],
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertIn('joe', users)
- self.assertIn('bob', users)
- self.assertEqual({'default': False}, users['joe'])
- self.assertEqual({'default': False}, users['bob'])
-
- def test_users_old_user(self):
- distro = self._make_distro('ubuntu', bcfg)
- ug_cfg = {
- 'user': 'zetta',
- 'users': 'default'
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertNotIn('bob', users) # Bob is not the default now, zetta is
- self.assertIn('zetta', users)
- self.assertTrue(users['zetta']['default'])
- self.assertNotIn('default', users)
- ug_cfg = {
- 'user': 'zetta',
- 'users': 'default, joe'
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertNotIn('bob', users) # Bob is not the default now, zetta is
- self.assertIn('joe', users)
- self.assertIn('zetta', users)
- self.assertTrue(users['zetta']['default'])
- self.assertNotIn('default', users)
- ug_cfg = {
- 'user': 'zetta',
- 'users': ['bob', 'joe']
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertIn('bob', users)
- self.assertIn('joe', users)
- self.assertIn('zetta', users)
- self.assertTrue(users['zetta']['default'])
- ug_cfg = {
- 'user': 'zetta',
- 'users': {
- 'bob': True,
- 'joe': True,
- }
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertIn('bob', users)
- self.assertIn('joe', users)
- self.assertIn('zetta', users)
- self.assertTrue(users['zetta']['default'])
- ug_cfg = {
- 'user': 'zetta',
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertIn('zetta', users)
- ug_cfg = {}
- (users, groups) = self._norm(ug_cfg, distro)
- self.assertEqual({}, users)
- self.assertEqual({}, groups)
-
- def test_users_dict_default_additional(self):
- distro = self._make_distro('ubuntu', bcfg)
- ug_cfg = {
- 'users': [
- {'name': 'default', 'blah': True}
- ],
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertIn('bob', users)
- self.assertEqual(",".join(distro.get_default_user()['groups']),
- users['bob']['groups'])
- self.assertEqual(True, users['bob']['blah'])
- self.assertEqual(True, users['bob']['default'])
-
- def test_users_dict_extract(self):
- distro = self._make_distro('ubuntu', bcfg)
- ug_cfg = {
- 'users': [
- 'default',
- ],
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertIn('bob', users)
- (name, config) = ug_util.extract_default(users)
- self.assertEqual(name, 'bob')
- expected_config = {}
- def_config = None
- try:
- def_config = distro.get_default_user()
- except NotImplementedError:
- pass
- if not def_config:
- def_config = {}
- expected_config.update(def_config)
-
- # Ignore these for now
- expected_config.pop('name', None)
- expected_config.pop('groups', None)
- config.pop('groups', None)
- self.assertEqual(config, expected_config)
-
- def test_users_dict_default(self):
- distro = self._make_distro('ubuntu', bcfg)
- ug_cfg = {
- 'users': [
- 'default',
- ],
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertIn('bob', users)
- self.assertEqual(",".join(distro.get_default_user()['groups']),
- users['bob']['groups'])
- self.assertEqual(True, users['bob']['default'])
-
- def test_users_dict_trans(self):
- distro = self._make_distro('ubuntu')
- ug_cfg = {
- 'users': [
- {'name': 'joe',
- 'tr-me': True},
- {'name': 'bob'},
- ],
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertIn('joe', users)
- self.assertIn('bob', users)
- self.assertEqual({'tr_me': True, 'default': False}, users['joe'])
- self.assertEqual({'default': False}, users['bob'])
-
- def test_users_dict(self):
- distro = self._make_distro('ubuntu')
- ug_cfg = {
- 'users': [
- {'name': 'joe'},
- {'name': 'bob'},
- ],
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- self.assertIn('joe', users)
- self.assertIn('bob', users)
- self.assertEqual({'default': False}, users['joe'])
- self.assertEqual({'default': False}, users['bob'])
-
- @mock.patch('cloudinit.subp.subp')
- def test_create_snap_user(self, mock_subp):
- mock_subp.side_effect = [('{"username": "joe", "ssh-key-count": 1}\n',
- '')]
- distro = self._make_distro('ubuntu')
- ug_cfg = {
- 'users': [
- {'name': 'joe', 'snapuser': 'joe@joe.com'},
- ],
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- for (user, config) in users.items():
- print('user=%s config=%s' % (user, config))
- username = distro.create_user(user, **config)
-
- snapcmd = ['snap', 'create-user', '--sudoer', '--json', 'joe@joe.com']
- mock_subp.assert_called_with(snapcmd, capture=True, logstring=snapcmd)
- self.assertEqual(username, 'joe')
-
- @mock.patch('cloudinit.subp.subp')
- def test_create_snap_user_known(self, mock_subp):
- mock_subp.side_effect = [('{"username": "joe", "ssh-key-count": 1}\n',
- '')]
- distro = self._make_distro('ubuntu')
- ug_cfg = {
- 'users': [
- {'name': 'joe', 'snapuser': 'joe@joe.com', 'known': True},
- ],
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- for (user, config) in users.items():
- print('user=%s config=%s' % (user, config))
- username = distro.create_user(user, **config)
-
- snapcmd = ['snap', 'create-user', '--sudoer', '--json', '--known',
- 'joe@joe.com']
- mock_subp.assert_called_with(snapcmd, capture=True, logstring=snapcmd)
- self.assertEqual(username, 'joe')
-
- @mock.patch('cloudinit.util.system_is_snappy')
- @mock.patch('cloudinit.util.is_group')
- @mock.patch('cloudinit.subp.subp')
- def test_add_user_on_snappy_system(self, mock_subp, mock_isgrp,
- mock_snappy):
- mock_isgrp.return_value = False
- mock_subp.return_value = True
- mock_snappy.return_value = True
- distro = self._make_distro('ubuntu')
- ug_cfg = {
- 'users': [
- {'name': 'joe', 'groups': 'users', 'create_groups': True},
- ],
- }
- (users, _groups) = self._norm(ug_cfg, distro)
- for (user, config) in users.items():
- print('user=%s config=%s' % (user, config))
- distro.add_user(user, **config)
-
- groupcmd = ['groupadd', 'users', '--extrausers']
- addcmd = ['useradd', 'joe', '--extrausers', '--groups', 'users', '-m']
-
- mock_subp.assert_any_call(groupcmd)
- mock_subp.assert_any_call(addcmd, logstring=addcmd)
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_dmi.py b/tests/unittests/test_dmi.py
new file mode 100644
index 00000000..6c28724a
--- /dev/null
+++ b/tests/unittests/test_dmi.py
@@ -0,0 +1,168 @@
+import os
+import shutil
+import tempfile
+from unittest import mock
+
+from cloudinit import dmi, subp, util
+from tests.unittests import helpers
+
+
+class TestReadDMIData(helpers.FilesystemMockingTestCase):
+ def setUp(self):
+ super(TestReadDMIData, self).setUp()
+ self.new_root = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, self.new_root)
+ self.reRoot(self.new_root)
+ p = mock.patch("cloudinit.dmi.is_container", return_value=False)
+ self.addCleanup(p.stop)
+ self._m_is_container = p.start()
+ p = mock.patch("cloudinit.dmi.is_FreeBSD", return_value=False)
+ self.addCleanup(p.stop)
+ self._m_is_FreeBSD = p.start()
+
+ def _create_sysfs_parent_directory(self):
+ util.ensure_dir(os.path.join("sys", "class", "dmi", "id"))
+
+ def _create_sysfs_file(self, key, content):
+ """Mocks the sys path found on Linux systems."""
+ self._create_sysfs_parent_directory()
+ dmi_key = "/sys/class/dmi/id/{0}".format(key)
+ util.write_file(dmi_key, content)
+
+ def _configure_dmidecode_return(self, key, content, error=None):
+ """
+ In order to test a missing sys path and call outs to dmidecode, this
+ function fakes the results of dmidecode to test the results.
+ """
+
+ def _dmidecode_subp(cmd):
+ if cmd[-1] != key:
+ raise subp.ProcessExecutionError()
+ return (content, error)
+
+ self.patched_funcs.enter_context(
+ mock.patch("cloudinit.dmi.subp.which", side_effect=lambda _: True)
+ )
+ self.patched_funcs.enter_context(
+ mock.patch("cloudinit.dmi.subp.subp", side_effect=_dmidecode_subp)
+ )
+
+ def _configure_kenv_return(self, key, content, error=None):
+ """
+ In order to test a FreeBSD system call outs to kenv, this
+ function fakes the results of kenv to test the results.
+ """
+
+ def _kenv_subp(cmd):
+ if cmd[-1] != dmi.DMIDECODE_TO_KERNEL[key].freebsd:
+ raise subp.ProcessExecutionError()
+ return (content, error)
+
+ self.patched_funcs.enter_context(
+ mock.patch("cloudinit.dmi.subp.subp", side_effect=_kenv_subp)
+ )
+
+ def patch_mapping(self, new_mapping):
+ self.patched_funcs.enter_context(
+ mock.patch("cloudinit.dmi.DMIDECODE_TO_KERNEL", new_mapping)
+ )
+
+ def test_sysfs_used_with_key_in_mapping_and_file_on_disk(self):
+ self.patch_mapping({"mapped-key": dmi.kdmi("mapped-value", None)})
+ expected_dmi_value = "sys-used-correctly"
+ self._create_sysfs_file("mapped-value", expected_dmi_value)
+ self._configure_dmidecode_return("mapped-key", "wrong-wrong-wrong")
+ self.assertEqual(expected_dmi_value, dmi.read_dmi_data("mapped-key"))
+
+ def test_dmidecode_used_if_no_sysfs_file_on_disk(self):
+ self.patch_mapping({})
+ self._create_sysfs_parent_directory()
+ expected_dmi_value = "dmidecode-used"
+ self._configure_dmidecode_return("use-dmidecode", expected_dmi_value)
+ with mock.patch("cloudinit.util.os.uname") as m_uname:
+ m_uname.return_value = (
+ "x-sysname",
+ "x-nodename",
+ "x-release",
+ "x-version",
+ "x86_64",
+ )
+ self.assertEqual(
+ expected_dmi_value, dmi.read_dmi_data("use-dmidecode")
+ )
+
+ def test_dmidecode_not_used_on_arm(self):
+ self.patch_mapping({})
+ print("current =%s", subp)
+ self._create_sysfs_parent_directory()
+ dmi_val = "from-dmidecode"
+ dmi_name = "use-dmidecode"
+ self._configure_dmidecode_return(dmi_name, dmi_val)
+ print("now =%s", subp)
+
+ expected = {"armel": None, "aarch64": dmi_val, "x86_64": dmi_val}
+ found = {}
+ # we do not run the 'dmi-decode' binary on some arches
+ # verify that anything requested that is not in the sysfs dir
+ # will return None on those arches.
+ with mock.patch("cloudinit.util.os.uname") as m_uname:
+ for arch in expected:
+ m_uname.return_value = (
+ "x-sysname",
+ "x-nodename",
+ "x-release",
+ "x-version",
+ arch,
+ )
+ print("now2 =%s", subp)
+ found[arch] = dmi.read_dmi_data(dmi_name)
+ self.assertEqual(expected, found)
+
+ def test_none_returned_if_neither_source_has_data(self):
+ self.patch_mapping({})
+ self._configure_dmidecode_return("key", "value")
+ self.assertIsNone(dmi.read_dmi_data("expect-fail"))
+
+ def test_none_returned_if_dmidecode_not_in_path(self):
+ self.patched_funcs.enter_context(
+ mock.patch.object(subp, "which", lambda _: False)
+ )
+ self.patch_mapping({})
+ self.assertIsNone(dmi.read_dmi_data("expect-fail"))
+
+ def test_empty_string_returned_instead_of_foxfox(self):
+ # uninitialized dmi values show as \xff, return empty string
+ my_len = 32
+ dmi_value = b"\xff" * my_len + b"\n"
+ expected = ""
+ dmi_key = "system-product-name"
+ sysfs_key = "product_name"
+ self._create_sysfs_file(sysfs_key, dmi_value)
+ self.assertEqual(expected, dmi.read_dmi_data(dmi_key))
+
+ def test_container_returns_none(self):
+ """In a container read_dmi_data should always return None."""
+
+ # first verify we get the value if not in container
+ self._m_is_container.return_value = False
+ key, val = ("system-product-name", "my_product")
+ self._create_sysfs_file("product_name", val)
+ self.assertEqual(val, dmi.read_dmi_data(key))
+
+ # then verify in container returns None
+ self._m_is_container.return_value = True
+ self.assertIsNone(dmi.read_dmi_data(key))
+
+ def test_container_returns_none_on_unknown(self):
+ """In a container even bogus keys return None."""
+ self._m_is_container.return_value = True
+ self._create_sysfs_file("product_name", "should-be-ignored")
+ self.assertIsNone(dmi.read_dmi_data("bogus"))
+ self.assertIsNone(dmi.read_dmi_data("system-product-name"))
+
+ def test_freebsd_uses_kenv(self):
+ """On a FreeBSD system, kenv is called."""
+ self._m_is_FreeBSD.return_value = True
+ key, val = ("system-product-name", "my_product")
+ self._configure_kenv_return(key, val)
+ self.assertEqual(dmi.read_dmi_data(key), val)
diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py
index 1d8aaf18..0b0de395 100644
--- a/tests/unittests/test_ds_identify.py
+++ b/tests/unittests/test_ds_identify.py
@@ -1,27 +1,34 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from collections import namedtuple
import copy
import os
+from collections import namedtuple
from uuid import uuid4
-from cloudinit import safeyaml
-from cloudinit import subp
-from cloudinit import util
-from cloudinit.tests.helpers import (
- CiTestCase, dir2dict, populate_dir, populate_dir_with_ts)
-
+from cloudinit import safeyaml, subp, util
from cloudinit.sources import DataSourceIBMCloud as ds_ibm
-from cloudinit.sources import DataSourceSmartOS as ds_smartos
from cloudinit.sources import DataSourceOracle as ds_oracle
-
-UNAME_MYSYS = ("Linux bart 4.4.0-62-generic #83-Ubuntu "
- "SMP Wed Jan 18 14:10:15 UTC 2017 x86_64 GNU/Linux")
-UNAME_PPC64EL = ("Linux diamond 4.4.0-83-generic #106-Ubuntu SMP "
- "Mon Jun 26 17:53:54 UTC 2017 "
- "ppc64le ppc64le ppc64le GNU/Linux")
-UNAME_FREEBSD = ("FreeBSD fbsd12-1 12.1-RELEASE-p10 "
- "FreeBSD 12.1-RELEASE-p10 GENERIC amd64")
+from cloudinit.sources import DataSourceSmartOS as ds_smartos
+from tests.unittests.helpers import (
+ CiTestCase,
+ cloud_init_project_dir,
+ dir2dict,
+ populate_dir,
+ populate_dir_with_ts,
+)
+
+UNAME_MYSYS = (
+ "Linux bart 4.4.0-62-generic #83-Ubuntu "
+ "SMP Wed Jan 18 14:10:15 UTC 2017 x86_64 GNU/Linux"
+)
+UNAME_PPC64EL = (
+ "Linux diamond 4.4.0-83-generic #106-Ubuntu SMP "
+ "Mon Jun 26 17:53:54 UTC 2017 "
+ "ppc64le ppc64le ppc64le GNU/Linux"
+)
+UNAME_FREEBSD = (
+ "FreeBSD fbsd12-1 12.1-RELEASE-p10 FreeBSD 12.1-RELEASE-p10 GENERIC amd64"
+)
BLKID_EFI_ROOT = """
DEVNAME=/dev/sda1
@@ -37,10 +44,16 @@ PARTUUID=30c65c77-e07d-4039-b2fb-88b1fb5fa1fc
# this is a Ubuntu 18.04 disk.img output (dual uefi and bios bootable)
BLKID_UEFI_UBUNTU = [
- {'DEVNAME': 'vda1', 'TYPE': 'ext4', 'PARTUUID': uuid4(), 'UUID': uuid4()},
- {'DEVNAME': 'vda14', 'PARTUUID': uuid4()},
- {'DEVNAME': 'vda15', 'TYPE': 'vfat', 'LABEL': 'UEFI', 'PARTUUID': uuid4(),
- 'UUID': '5F55-129B'}]
+ {"DEVNAME": "vda1", "TYPE": "ext4", "PARTUUID": uuid4(), "UUID": uuid4()},
+ {"DEVNAME": "vda14", "PARTUUID": uuid4()},
+ {
+ "DEVNAME": "vda15",
+ "TYPE": "vfat",
+ "LABEL": "UEFI",
+ "PARTUUID": uuid4(),
+ "UUID": "5F55-129B",
+ },
+]
POLICY_FOUND_ONLY = "search,found=all,maybe=none,notfound=disabled"
@@ -48,7 +61,7 @@ POLICY_FOUND_OR_MAYBE = "search,found=all,maybe=all,notfound=disabled"
DI_DEFAULT_POLICY = "search,found=all,maybe=all,notfound=disabled"
DI_DEFAULT_POLICY_NO_DMI = "search,found=all,maybe=all,notfound=enabled"
DI_EC2_STRICT_ID_DEFAULT = "true"
-OVF_MATCH_STRING = 'http://schemas.dmtf.org/ovf/environment/1'
+OVF_MATCH_STRING = "http://schemas.dmtf.org/ovf/environment/1"
SHELL_MOCK_TMPL = """\
%(name)s() {
@@ -62,7 +75,7 @@ SHELL_MOCK_TMPL = """\
RC_FOUND = 0
RC_NOT_FOUND = 1
-DS_NONE = 'None'
+DS_NONE = "None"
P_CHASSIS_ASSET_TAG = "sys/class/dmi/id/chassis_asset_tag"
P_PRODUCT_NAME = "sys/class/dmi/id/product_name"
@@ -74,31 +87,45 @@ P_DSID_CFG = "etc/cloud/ds-identify.cfg"
IBM_CONFIG_UUID = "9796-932E"
-MOCK_VIRT_IS_CONTAINER_OTHER = {'name': 'detect_virt',
- 'RET': 'container-other', 'ret': 0}
-MOCK_VIRT_IS_KVM = {'name': 'detect_virt', 'RET': 'kvm', 'ret': 0}
-MOCK_VIRT_IS_VMWARE = {'name': 'detect_virt', 'RET': 'vmware', 'ret': 0}
+MOCK_VIRT_IS_CONTAINER_OTHER = {
+ "name": "detect_virt",
+ "RET": "container-other",
+ "ret": 0,
+}
+MOCK_NOT_LXD_DATASOURCE = {"name": "dscheck_LXD", "ret": 1}
+MOCK_VIRT_IS_KVM = {"name": "detect_virt", "RET": "kvm", "ret": 0}
+MOCK_VIRT_IS_VMWARE = {"name": "detect_virt", "RET": "vmware", "ret": 0}
# currenty' SmartOS hypervisor "bhyve" is unknown by systemd-detect-virt.
-MOCK_VIRT_IS_VM_OTHER = {'name': 'detect_virt', 'RET': 'vm-other', 'ret': 0}
-MOCK_VIRT_IS_XEN = {'name': 'detect_virt', 'RET': 'xen', 'ret': 0}
-MOCK_UNAME_IS_PPC64 = {'name': 'uname', 'out': UNAME_PPC64EL, 'ret': 0}
-MOCK_UNAME_IS_FREEBSD = {'name': 'uname', 'out': UNAME_FREEBSD, 'ret': 0}
+MOCK_VIRT_IS_VM_OTHER = {"name": "detect_virt", "RET": "vm-other", "ret": 0}
+MOCK_VIRT_IS_XEN = {"name": "detect_virt", "RET": "xen", "ret": 0}
+MOCK_UNAME_IS_PPC64 = {"name": "uname", "out": UNAME_PPC64EL, "ret": 0}
+MOCK_UNAME_IS_FREEBSD = {"name": "uname", "out": UNAME_FREEBSD, "ret": 0}
+
+DEFAULT_MOCKS = [MOCK_NOT_LXD_DATASOURCE]
shell_true = 0
shell_false = 1
-CallReturn = namedtuple('CallReturn',
- ['rc', 'stdout', 'stderr', 'cfg', 'files'])
+CallReturn = namedtuple(
+ "CallReturn", ["rc", "stdout", "stderr", "cfg", "files"]
+)
class DsIdentifyBase(CiTestCase):
- dsid_path = os.path.realpath('tools/ds-identify')
- allowed_subp = ['sh']
-
- def call(self, rootd=None, mocks=None, func="main", args=None, files=None,
- policy_dmi=DI_DEFAULT_POLICY,
- policy_no_dmi=DI_DEFAULT_POLICY_NO_DMI,
- ec2_strict_id=DI_EC2_STRICT_ID_DEFAULT):
+ dsid_path = cloud_init_project_dir("tools/ds-identify")
+ allowed_subp = ["sh"]
+
+ def call(
+ self,
+ rootd=None,
+ mocks=None,
+ func="main",
+ args=None,
+ files=None,
+ policy_dmi=DI_DEFAULT_POLICY,
+ policy_no_dmi=DI_DEFAULT_POLICY_NO_DMI,
+ ec2_strict_id=DI_EC2_STRICT_ID_DEFAULT,
+ ):
if args is None:
args = []
if mocks is None:
@@ -110,7 +137,7 @@ class DsIdentifyBase(CiTestCase):
if rootd is None:
rootd = self.tmp_dir()
- unset = '_unset'
+ unset = "_unset"
wrap = self.tmp_path(path="_shwrap", dir=rootd)
populate_dir(rootd, files)
@@ -126,11 +153,11 @@ class DsIdentifyBase(CiTestCase):
'DI_DEFAULT_POLICY="%s"' % policy_dmi,
'DI_DEFAULT_POLICY_NO_DMI="%s"' % policy_no_dmi,
'DI_EC2_STRICT_ID_DEFAULT="%s"' % ec2_strict_id,
- ""
+ "",
]
def write_mock(data):
- ddata = {'out': None, 'err': None, 'ret': 0, 'RET': None}
+ ddata = {"out": None, "err": None, "ret": 0, "RET": None}
ddata.update(data)
for k in ddata:
if ddata[k] is None:
@@ -139,68 +166,88 @@ class DsIdentifyBase(CiTestCase):
mocklines = []
defaults = [
- {'name': 'detect_virt', 'RET': 'none', 'ret': 1},
- {'name': 'uname', 'out': UNAME_MYSYS},
- {'name': 'blkid', 'out': BLKID_EFI_ROOT},
- {'name': 'ovf_vmware_transport_guestinfo',
- 'out': 'No value found', 'ret': 1},
- {'name': 'dmi_decode', 'ret': 1,
- 'err': 'No dmidecode program. ERROR.'},
- {'name': 'get_kenv_field', 'ret': 1,
- 'err': 'No kenv program. ERROR.'},
+ {"name": "detect_virt", "RET": "none", "ret": 1},
+ {"name": "uname", "out": UNAME_MYSYS},
+ {"name": "blkid", "out": BLKID_EFI_ROOT},
+ {
+ "name": "ovf_vmware_transport_guestinfo",
+ "out": "No value found",
+ "ret": 1,
+ },
+ {
+ "name": "dmi_decode",
+ "ret": 1,
+ "err": "No dmidecode program. ERROR.",
+ },
+ {
+ "name": "get_kenv_field",
+ "ret": 1,
+ "err": "No kenv program. ERROR.",
+ },
]
- written = [d['name'] for d in mocks]
+ written = [d["name"] for d in mocks]
for data in mocks:
mocklines.append(write_mock(data))
for d in defaults:
- if d['name'] not in written:
+ if d["name"] not in written:
mocklines.append(write_mock(d))
- endlines = [
- func + ' ' + ' '.join(['"%s"' % s for s in args])
- ]
+ endlines = [func + " " + " ".join(['"%s"' % s for s in args])]
with open(wrap, "w") as fp:
- fp.write('\n'.join(head + mocklines + endlines) + "\n")
+ fp.write("\n".join(head + mocklines + endlines) + "\n")
rc = 0
try:
- out, err = subp.subp(['sh', '-c', '. %s' % wrap], capture=True)
+ out, err = subp.subp(["sh", "-c", ". %s" % wrap], capture=True)
except subp.ProcessExecutionError as e:
rc = e.exit_code
out = e.stdout
err = e.stderr
cfg = None
- cfg_out = os.path.join(rootd, 'run/cloud-init/cloud.cfg')
+ cfg_out = os.path.join(rootd, "run/cloud-init/cloud.cfg")
if os.path.exists(cfg_out):
contents = util.load_file(cfg_out)
try:
cfg = safeyaml.load(contents)
except Exception as e:
- cfg = {"_INVALID_YAML": contents,
- "_EXCEPTION": str(e)}
+ cfg = {"_INVALID_YAML": contents, "_EXCEPTION": str(e)}
return CallReturn(rc, out, err, cfg, dir2dict(rootd))
def _call_via_dict(self, data, rootd=None, **kwargs):
# return output of self.call with a dict input like VALID_CFG[item]
- xwargs = {'rootd': rootd}
- passthrough = ('mocks', 'func', 'args', 'policy_dmi',
- 'policy_no_dmi', 'files')
+ xwargs = {"rootd": rootd}
+ passthrough = (
+ "mocks",
+ "func",
+ "args",
+ "policy_dmi",
+ "policy_no_dmi",
+ "files",
+ )
for k in passthrough:
if k in data:
xwargs[k] = data[k]
if k in kwargs:
xwargs[k] = kwargs[k]
+ if "mocks" not in xwargs:
+ xwargs["mocks"] = DEFAULT_MOCKS
+ else:
+ mocked_funcs = [m["name"] for m in xwargs["mocks"]]
+ for default_mock in DEFAULT_MOCKS:
+ if default_mock["name"] not in mocked_funcs:
+ xwargs["mocks"].append(default_mock)
return self.call(**xwargs)
def _test_ds_found(self, name):
data = copy.deepcopy(VALID_CFG[name])
return self._check_via_dict(
- data, RC_FOUND, dslist=[data.get('ds'), DS_NONE])
+ data, RC_FOUND, dslist=[data.get("ds"), DS_NONE]
+ )
def _test_ds_not_found(self, name):
data = copy.deepcopy(VALID_CFG[name])
@@ -212,87 +259,104 @@ class DsIdentifyBase(CiTestCase):
try:
self.assertEqual(rc, ret.rc)
if dslist is not None:
- self.assertEqual(dslist, ret.cfg['datasource_list'])
+ self.assertEqual(dslist, ret.cfg["datasource_list"])
good = True
finally:
if not good:
- _print_run_output(ret.rc, ret.stdout, ret.stderr, ret.cfg,
- ret.files)
+ _print_run_output(
+ ret.rc, ret.stdout, ret.stderr, ret.cfg, ret.files
+ )
return ret
class TestDsIdentify(DsIdentifyBase):
def test_wb_print_variables(self):
"""_print_info reports an array of discovered variables to stderr."""
- data = VALID_CFG['Azure-dmi-detection']
+ data = VALID_CFG["Azure-dmi-detection"]
_, _, err, _, _ = self._call_via_dict(data)
expected_vars = [
- 'DMI_PRODUCT_NAME', 'DMI_SYS_VENDOR', 'DMI_PRODUCT_SERIAL',
- 'DMI_PRODUCT_UUID', 'PID_1_PRODUCT_NAME', 'DMI_CHASSIS_ASSET_TAG',
- 'FS_LABELS', 'KERNEL_CMDLINE', 'VIRT', 'UNAME_KERNEL_NAME',
- 'UNAME_KERNEL_RELEASE', 'UNAME_KERNEL_VERSION', 'UNAME_MACHINE',
- 'UNAME_NODENAME', 'UNAME_OPERATING_SYSTEM', 'DSNAME', 'DSLIST',
- 'MODE', 'ON_FOUND', 'ON_MAYBE', 'ON_NOTFOUND']
+ "DMI_PRODUCT_NAME",
+ "DMI_SYS_VENDOR",
+ "DMI_PRODUCT_SERIAL",
+ "DMI_PRODUCT_UUID",
+ "PID_1_PRODUCT_NAME",
+ "DMI_CHASSIS_ASSET_TAG",
+ "FS_LABELS",
+ "KERNEL_CMDLINE",
+ "VIRT",
+ "UNAME_KERNEL_NAME",
+ "UNAME_KERNEL_RELEASE",
+ "UNAME_KERNEL_VERSION",
+ "UNAME_MACHINE",
+ "UNAME_NODENAME",
+ "UNAME_OPERATING_SYSTEM",
+ "DSNAME",
+ "DSLIST",
+ "MODE",
+ "ON_FOUND",
+ "ON_MAYBE",
+ "ON_NOTFOUND",
+ ]
for var in expected_vars:
- self.assertIn('{0}='.format(var), err)
+ self.assertIn("{0}=".format(var), err)
def test_azure_dmi_detection_from_chassis_asset_tag(self):
"""Azure datasource is detected from DMI chassis-asset-tag"""
- self._test_ds_found('Azure-dmi-detection')
+ self._test_ds_found("Azure-dmi-detection")
def test_azure_seed_file_detection(self):
"""Azure datasource is detected due to presence of a seed file.
The seed file tested is /var/lib/cloud/seed/azure/ovf-env.xml."""
- self._test_ds_found('Azure-seed-detection')
+ self._test_ds_found("Azure-seed-detection")
def test_aws_ec2_hvm(self):
"""EC2: hvm instances use dmi serial and uuid starting with 'ec2'."""
- self._test_ds_found('Ec2-hvm')
+ self._test_ds_found("Ec2-hvm")
def test_aws_ec2_xen(self):
"""EC2: sys/hypervisor/uuid starts with ec2."""
- self._test_ds_found('Ec2-xen')
+ self._test_ds_found("Ec2-xen")
def test_brightbox_is_ec2(self):
"""EC2: product_serial ends with '.brightbox.com'"""
- self._test_ds_found('Ec2-brightbox')
+ self._test_ds_found("Ec2-brightbox")
def test_bobrightbox_is_not_brightbox(self):
"""EC2: bobrightbox.com in product_serial is not brightbox'"""
- self._test_ds_not_found('Ec2-brightbox-negative')
+ self._test_ds_not_found("Ec2-brightbox-negative")
def test_freebsd_nocloud(self):
"""NoCloud identified on FreeBSD via label by geom."""
- self._test_ds_found('NoCloud-fbsd')
+ self._test_ds_found("NoCloud-fbsd")
def test_gce_by_product_name(self):
"""GCE identifies itself with product_name."""
- self._test_ds_found('GCE')
+ self._test_ds_found("GCE")
def test_gce_by_serial(self):
"""Older gce compute instances must be identified by serial."""
- self._test_ds_found('GCE-serial')
+ self._test_ds_found("GCE-serial")
def test_config_drive(self):
"""ConfigDrive datasource has a disk with LABEL=config-2."""
- self._test_ds_found('ConfigDrive')
+ self._test_ds_found("ConfigDrive")
def test_rbx_cloud(self):
"""Rbx datasource has a disk with LABEL=CLOUDMD."""
- self._test_ds_found('RbxCloud')
+ self._test_ds_found("RbxCloud")
def test_rbx_cloud_lower(self):
"""Rbx datasource has a disk with LABEL=cloudmd."""
- self._test_ds_found('RbxCloudLower')
+ self._test_ds_found("RbxCloudLower")
def test_config_drive_upper(self):
"""ConfigDrive datasource has a disk with LABEL=CONFIG-2."""
- self._test_ds_found('ConfigDriveUpper')
+ self._test_ds_found("ConfigDriveUpper")
def test_config_drive_seed(self):
"""Config Drive seed directory."""
- self._test_ds_found('ConfigDrive-seed')
+ self._test_ds_found("ConfigDrive-seed")
def test_config_drive_interacts_with_ibmcloud_config_disk(self):
"""Verify ConfigDrive interaction with IBMCloud.
@@ -300,34 +364,35 @@ class TestDsIdentify(DsIdentifyBase):
If ConfigDrive is enabled and not IBMCloud, then ConfigDrive
should claim the ibmcloud 'config-2' disk.
If IBMCloud is enabled, then ConfigDrive should skip."""
- data = copy.deepcopy(VALID_CFG['IBMCloud-config-2'])
- files = data.get('files', {})
+ data = copy.deepcopy(VALID_CFG["IBMCloud-config-2"])
+ files = data.get("files", {})
if not files:
- data['files'] = files
- cfgpath = 'etc/cloud/cloud.cfg.d/99_networklayer_common.cfg'
+ data["files"] = files
+ cfgpath = "etc/cloud/cloud.cfg.d/99_networklayer_common.cfg"
# with list including IBMCloud, config drive should be not found.
- files[cfgpath] = 'datasource_list: [ ConfigDrive, IBMCloud ]\n'
+ files[cfgpath] = "datasource_list: [ ConfigDrive, IBMCloud ]\n"
ret = self._check_via_dict(data, shell_true)
- self.assertEqual(
- ret.cfg.get('datasource_list'), ['IBMCloud', 'None'])
+ self.assertEqual(ret.cfg.get("datasource_list"), ["IBMCloud", "None"])
# But if IBMCloud is not enabled, config drive should claim this.
- files[cfgpath] = 'datasource_list: [ ConfigDrive, NoCloud ]\n'
+ files[cfgpath] = "datasource_list: [ ConfigDrive, NoCloud ]\n"
ret = self._check_via_dict(data, shell_true)
self.assertEqual(
- ret.cfg.get('datasource_list'), ['ConfigDrive', 'None'])
+ ret.cfg.get("datasource_list"), ["ConfigDrive", "None"]
+ )
def test_ibmcloud_template_userdata_in_provisioning(self):
"""Template provisioned with user-data during provisioning stage.
Template provisioning with user-data has METADATA disk,
datasource should return not found."""
- data = copy.deepcopy(VALID_CFG['IBMCloud-metadata'])
+ data = copy.deepcopy(VALID_CFG["IBMCloud-metadata"])
# change the 'is_ibm_provisioning' mock to return 1 (false)
- isprov_m = [m for m in data['mocks']
- if m["name"] == "is_ibm_provisioning"][0]
- isprov_m['ret'] = shell_true
+ isprov_m = [
+ m for m in data["mocks"] if m["name"] == "is_ibm_provisioning"
+ ][0]
+ isprov_m["ret"] = shell_true
return self._check_via_dict(data, RC_NOT_FOUND)
def test_ibmcloud_template_userdata(self):
@@ -335,58 +400,61 @@ class TestDsIdentify(DsIdentifyBase):
Template provisioning with user-data has METADATA disk.
datasource should return found."""
- self._test_ds_found('IBMCloud-metadata')
+ self._test_ds_found("IBMCloud-metadata")
def test_ibmcloud_template_no_userdata_in_provisioning(self):
"""Template provisioned with no user-data during provisioning.
no disks attached. Datasource should return not found."""
- data = copy.deepcopy(VALID_CFG['IBMCloud-nodisks'])
- data['mocks'].append(
- {'name': 'is_ibm_provisioning', 'ret': shell_true})
+ data = copy.deepcopy(VALID_CFG["IBMCloud-nodisks"])
+ data["mocks"].append(
+ {"name": "is_ibm_provisioning", "ret": shell_true}
+ )
return self._check_via_dict(data, RC_NOT_FOUND)
def test_ibmcloud_template_no_userdata(self):
"""Template provisioned with no user-data first boot.
no disks attached. Datasource should return found."""
- self._check_via_dict(VALID_CFG['IBMCloud-nodisks'], RC_NOT_FOUND)
+ self._check_via_dict(VALID_CFG["IBMCloud-nodisks"], RC_NOT_FOUND)
def test_ibmcloud_os_code(self):
"""Launched by os code always has config-2 disk."""
- self._test_ds_found('IBMCloud-config-2')
+ self._test_ds_found("IBMCloud-config-2")
def test_ibmcloud_os_code_different_uuid(self):
"""IBM cloud config-2 disks must be explicit match on UUID.
If the UUID is not 9796-932E then we actually expect ConfigDrive."""
- data = copy.deepcopy(VALID_CFG['IBMCloud-config-2'])
+ data = copy.deepcopy(VALID_CFG["IBMCloud-config-2"])
offset = None
- for m, d in enumerate(data['mocks']):
- if d.get('name') == "blkid":
+ for m, d in enumerate(data["mocks"]):
+ if d.get("name") == "blkid":
offset = m
break
if not offset:
raise ValueError("Expected to find 'blkid' mock, but did not.")
- data['mocks'][offset]['out'] = d['out'].replace(ds_ibm.IBM_CONFIG_UUID,
- "DEAD-BEEF")
+ data["mocks"][offset]["out"] = d["out"].replace(
+ ds_ibm.IBM_CONFIG_UUID, "DEAD-BEEF"
+ )
self._check_via_dict(
- data, rc=RC_FOUND, dslist=['ConfigDrive', DS_NONE])
+ data, rc=RC_FOUND, dslist=["ConfigDrive", DS_NONE]
+ )
def test_ibmcloud_with_nocloud_seed(self):
"""NoCloud seed should be preferred over IBMCloud.
A nocloud seed should be preferred over IBMCloud even if enabled.
Ubuntu 16.04 images have <vlc>/seed/nocloud-net. LP: #1766401."""
- data = copy.deepcopy(VALID_CFG['IBMCloud-config-2'])
- files = data.get('files', {})
+ data = copy.deepcopy(VALID_CFG["IBMCloud-config-2"])
+ files = data.get("files", {})
if not files:
- data['files'] = files
- files.update(VALID_CFG['NoCloud-seed']['files'])
+ data["files"] = files
+ files.update(VALID_CFG["NoCloud-seed"]["files"])
ret = self._check_via_dict(data, shell_true)
self.assertEqual(
- ['NoCloud', 'IBMCloud', 'None'],
- ret.cfg.get('datasource_list'))
+ ["NoCloud", "IBMCloud", "None"], ret.cfg.get("datasource_list")
+ )
def test_ibmcloud_with_configdrive_seed(self):
"""ConfigDrive seed should be preferred over IBMCloud.
@@ -394,28 +462,28 @@ class TestDsIdentify(DsIdentifyBase):
A ConfigDrive seed should be preferred over IBMCloud even if enabled.
Ubuntu 16.04 images have a fstab entry that mounts the
METADATA disk into <vlc>/seed/config_drive. LP: ##1766401."""
- data = copy.deepcopy(VALID_CFG['IBMCloud-config-2'])
- files = data.get('files', {})
+ data = copy.deepcopy(VALID_CFG["IBMCloud-config-2"])
+ files = data.get("files", {})
if not files:
- data['files'] = files
- files.update(VALID_CFG['ConfigDrive-seed']['files'])
+ data["files"] = files
+ files.update(VALID_CFG["ConfigDrive-seed"]["files"])
ret = self._check_via_dict(data, shell_true)
self.assertEqual(
- ['ConfigDrive', 'IBMCloud', 'None'],
- ret.cfg.get('datasource_list'))
+ ["ConfigDrive", "IBMCloud", "None"], ret.cfg.get("datasource_list")
+ )
def test_policy_disabled(self):
"""A Builtin policy of 'disabled' should return not found.
Even though a search would find something, the builtin policy of
disabled should cause the return of not found."""
- mydata = copy.deepcopy(VALID_CFG['Ec2-hvm'])
+ mydata = copy.deepcopy(VALID_CFG["Ec2-hvm"])
self._check_via_dict(mydata, rc=RC_NOT_FOUND, policy_dmi="disabled")
def test_policy_config_disable_overrides_builtin(self):
"""explicit policy: disabled in config file should cause not found."""
- mydata = copy.deepcopy(VALID_CFG['Ec2-hvm'])
- mydata['files'][P_DSID_CFG] = '\n'.join(['policy: disabled', ''])
+ mydata = copy.deepcopy(VALID_CFG["Ec2-hvm"])
+ mydata["files"][P_DSID_CFG] = "\n".join(["policy: disabled", ""])
self._check_via_dict(mydata, rc=RC_NOT_FOUND)
def test_single_entry_defines_datasource(self):
@@ -424,54 +492,55 @@ class TestDsIdentify(DsIdentifyBase):
Test the valid Ec2-hvm, but provide a config file that specifies
a single entry in datasource_list. The configured value should
be used."""
- mydata = copy.deepcopy(VALID_CFG['Ec2-hvm'])
- cfgpath = 'etc/cloud/cloud.cfg.d/myds.cfg'
- mydata['files'][cfgpath] = 'datasource_list: ["NoCloud"]\n'
- self._check_via_dict(mydata, rc=RC_FOUND, dslist=['NoCloud', DS_NONE])
+ mydata = copy.deepcopy(VALID_CFG["Ec2-hvm"])
+ cfgpath = "etc/cloud/cloud.cfg.d/myds.cfg"
+ mydata["files"][cfgpath] = 'datasource_list: ["NoCloud"]\n'
+ self._check_via_dict(mydata, rc=RC_FOUND, dslist=["NoCloud", DS_NONE])
def test_configured_list_with_none(self):
"""When datasource_list already contains None, None is not added.
The explicitly configured datasource_list has 'None' in it. That
should not have None automatically added."""
- mydata = copy.deepcopy(VALID_CFG['GCE'])
- cfgpath = 'etc/cloud/cloud.cfg.d/myds.cfg'
- mydata['files'][cfgpath] = 'datasource_list: ["Ec2", "None"]\n'
- self._check_via_dict(mydata, rc=RC_FOUND, dslist=['Ec2', DS_NONE])
+ mydata = copy.deepcopy(VALID_CFG["GCE"])
+ cfgpath = "etc/cloud/cloud.cfg.d/myds.cfg"
+ mydata["files"][cfgpath] = 'datasource_list: ["Ec2", "None"]\n'
+ self._check_via_dict(mydata, rc=RC_FOUND, dslist=["Ec2", DS_NONE])
def test_aliyun_identified(self):
"""Test that Aliyun cloud is identified by product id."""
- self._test_ds_found('AliYun')
+ self._test_ds_found("AliYun")
def test_aliyun_over_ec2(self):
"""Even if all other factors identified Ec2, AliYun should be used."""
- mydata = copy.deepcopy(VALID_CFG['Ec2-xen'])
- self._test_ds_found('AliYun')
- prod_name = VALID_CFG['AliYun']['files'][P_PRODUCT_NAME]
- mydata['files'][P_PRODUCT_NAME] = prod_name
+ mydata = copy.deepcopy(VALID_CFG["Ec2-xen"])
+ self._test_ds_found("AliYun")
+ prod_name = VALID_CFG["AliYun"]["files"][P_PRODUCT_NAME]
+ mydata["files"][P_PRODUCT_NAME] = prod_name
policy = "search,found=first,maybe=none,notfound=disabled"
- self._check_via_dict(mydata, rc=RC_FOUND, dslist=['AliYun', DS_NONE],
- policy_dmi=policy)
+ self._check_via_dict(
+ mydata, rc=RC_FOUND, dslist=["AliYun", DS_NONE], policy_dmi=policy
+ )
def test_default_openstack_intel_is_found(self):
"""On Intel, openstack must be identified."""
- self._test_ds_found('OpenStack')
+ self._test_ds_found("OpenStack")
def test_openstack_open_telekom_cloud(self):
"""Open Telecom identification."""
- self._test_ds_found('OpenStack-OpenTelekom')
+ self._test_ds_found("OpenStack-OpenTelekom")
def test_openstack_sap_ccloud(self):
"""SAP Converged Cloud identification"""
- self._test_ds_found('OpenStack-SAPCCloud')
+ self._test_ds_found("OpenStack-SAPCCloud")
def test_openstack_asset_tag_nova(self):
"""OpenStack identification via asset tag OpenStack Nova."""
- self._test_ds_found('OpenStack-AssetTag-Nova')
+ self._test_ds_found("OpenStack-AssetTag-Nova")
def test_openstack_asset_tag_copute(self):
"""OpenStack identification via asset tag OpenStack Compute."""
- self._test_ds_found('OpenStack-AssetTag-Compute')
+ self._test_ds_found("OpenStack-AssetTag-Compute")
def test_openstack_on_non_intel_is_maybe(self):
"""On non-Intel, openstack without dmi info is maybe.
@@ -479,175 +548,282 @@ class TestDsIdentify(DsIdentifyBase):
nova does not identify itself on platforms other than intel.
https://bugs.launchpad.net/cloud-init/+bugs?field.tag=dsid-nova"""
- data = copy.deepcopy(VALID_CFG['OpenStack'])
- del data['files'][P_PRODUCT_NAME]
- data.update({'policy_dmi': POLICY_FOUND_OR_MAYBE,
- 'policy_no_dmi': POLICY_FOUND_OR_MAYBE})
+ data = copy.deepcopy(VALID_CFG["OpenStack"])
+ del data["files"][P_PRODUCT_NAME]
+ data.update(
+ {
+ "policy_dmi": POLICY_FOUND_OR_MAYBE,
+ "policy_no_dmi": POLICY_FOUND_OR_MAYBE,
+ }
+ )
# this should show not found as default uname in tests is intel.
# and intel openstack requires positive identification.
self._check_via_dict(data, RC_NOT_FOUND, dslist=None)
# updating the uname to ppc64 though should get a maybe.
- data.update({'mocks': [MOCK_VIRT_IS_KVM, MOCK_UNAME_IS_PPC64]})
+ data.update({"mocks": [MOCK_VIRT_IS_KVM, MOCK_UNAME_IS_PPC64]})
(_, _, err, _, _) = self._check_via_dict(
- data, RC_FOUND, dslist=['OpenStack', 'None'])
+ data, RC_FOUND, dslist=["OpenStack", "None"]
+ )
self.assertIn("check for 'OpenStack' returned maybe", err)
def test_default_ovf_is_found(self):
"""OVF is identified found when ovf/ovf-env.xml seed file exists."""
- self._test_ds_found('OVF-seed')
+ self._test_ds_found("OVF-seed")
def test_default_ovf_with_detect_virt_none_not_found(self):
"""OVF identifies not found when detect_virt returns "none"."""
self._check_via_dict(
- {'ds': 'OVF'}, rc=RC_NOT_FOUND, policy_dmi="disabled")
+ {"ds": "OVF"}, rc=RC_NOT_FOUND, policy_dmi="disabled"
+ )
def test_default_ovf_returns_not_found_on_azure(self):
"""OVF datasource won't be found as false positive on Azure."""
- ovfonazure = copy.deepcopy(VALID_CFG['OVF'])
+ ovfonazure = copy.deepcopy(VALID_CFG["OVF"])
# Set azure asset tag to assert OVF content not found
- ovfonazure['files'][P_CHASSIS_ASSET_TAG] = (
- '7783-7084-3265-9085-8269-3286-77\n')
- self._check_via_dict(
- ovfonazure, RC_FOUND, dslist=['Azure', DS_NONE])
+ ovfonazure["files"][
+ P_CHASSIS_ASSET_TAG
+ ] = "7783-7084-3265-9085-8269-3286-77\n"
+ self._check_via_dict(ovfonazure, RC_FOUND, dslist=["Azure", DS_NONE])
def test_ovf_on_vmware_iso_found_by_cdrom_with_ovf_schema_match(self):
"""OVF is identified when iso9660 cdrom path contains ovf schema."""
- self._test_ds_found('OVF')
+ self._test_ds_found("OVF")
def test_ovf_on_vmware_guestinfo_found(self):
"""OVF guest info is found on vmware."""
- self._test_ds_found('OVF-guestinfo')
+ self._test_ds_found("OVF-guestinfo")
def test_ovf_on_vmware_iso_found_when_vmware_customization(self):
"""OVF is identified when vmware customization is enabled."""
- self._test_ds_found('OVF-vmware-customization')
+ self._test_ds_found("OVF-vmware-customization")
def test_ovf_on_vmware_iso_found_open_vm_tools_64(self):
"""OVF is identified when open-vm-tools installed in /usr/lib64."""
- cust64 = copy.deepcopy(VALID_CFG['OVF-vmware-customization'])
- p32 = 'usr/lib/vmware-tools/plugins/vmsvc/libdeployPkgPlugin.so'
- open64 = 'usr/lib64/open-vm-tools/plugins/vmsvc/libdeployPkgPlugin.so'
- cust64['files'][open64] = cust64['files'][p32]
- del cust64['files'][p32]
+ cust64 = copy.deepcopy(VALID_CFG["OVF-vmware-customization"])
+ p32 = "usr/lib/vmware-tools/plugins/vmsvc/libdeployPkgPlugin.so"
+ open64 = "usr/lib64/open-vm-tools/plugins/vmsvc/libdeployPkgPlugin.so"
+ cust64["files"][open64] = cust64["files"][p32]
+ del cust64["files"][p32]
+ return self._check_via_dict(
+ cust64, RC_FOUND, dslist=[cust64.get("ds"), DS_NONE]
+ )
+
+ def test_ovf_on_vmware_iso_found_open_vm_tools_x86_64_linux_gnu(self):
+ """OVF is identified when open-vm-tools installed in
+ /usr/lib/x86_64-linux-gnu."""
+ cust64 = copy.deepcopy(VALID_CFG["OVF-vmware-customization"])
+ p32 = "usr/lib/vmware-tools/plugins/vmsvc/libdeployPkgPlugin.so"
+ x86 = (
+ "usr/lib/x86_64-linux-gnu/open-vm-tools/plugins/vmsvc/"
+ "libdeployPkgPlugin.so"
+ )
+ cust64["files"][x86] = cust64["files"][p32]
+ del cust64["files"][p32]
+ return self._check_via_dict(
+ cust64, RC_FOUND, dslist=[cust64.get("ds"), DS_NONE]
+ )
+
+ def test_ovf_on_vmware_iso_found_open_vm_tools_aarch64_linux_gnu(self):
+ """OVF is identified when open-vm-tools installed in
+ /usr/lib/aarch64-linux-gnu."""
+ cust64 = copy.deepcopy(VALID_CFG["OVF-vmware-customization"])
+ p32 = "usr/lib/vmware-tools/plugins/vmsvc/libdeployPkgPlugin.so"
+ aarch64 = (
+ "usr/lib/aarch64-linux-gnu/open-vm-tools/plugins/vmsvc/"
+ "libdeployPkgPlugin.so"
+ )
+ cust64["files"][aarch64] = cust64["files"][p32]
+ del cust64["files"][p32]
return self._check_via_dict(
- cust64, RC_FOUND, dslist=[cust64.get('ds'), DS_NONE])
+ cust64, RC_FOUND, dslist=[cust64.get("ds"), DS_NONE]
+ )
def test_ovf_on_vmware_iso_found_by_cdrom_with_matching_fs_label(self):
"""OVF is identified by well-known iso9660 labels."""
- ovf_cdrom_by_label = copy.deepcopy(VALID_CFG['OVF'])
+ ovf_cdrom_by_label = copy.deepcopy(VALID_CFG["OVF"])
# Unset matching cdrom ovf schema content
- ovf_cdrom_by_label['files']['dev/sr0'] = 'No content match'
+ ovf_cdrom_by_label["files"]["dev/sr0"] = "No content match"
self._check_via_dict(
- ovf_cdrom_by_label, rc=RC_NOT_FOUND, policy_dmi="disabled")
+ ovf_cdrom_by_label, rc=RC_NOT_FOUND, policy_dmi="disabled"
+ )
# Add recognized labels
- valid_ovf_labels = ['ovf-transport', 'OVF-TRANSPORT',
- "OVFENV", "ovfenv", "OVF ENV", "ovf env"]
+ valid_ovf_labels = [
+ "ovf-transport",
+ "OVF-TRANSPORT",
+ "OVFENV",
+ "ovfenv",
+ "OVF ENV",
+ "ovf env",
+ ]
for valid_ovf_label in valid_ovf_labels:
- ovf_cdrom_by_label['mocks'][0]['out'] = blkid_out([
- {'DEVNAME': 'sda1', 'TYPE': 'ext4', 'LABEL': 'rootfs'},
- {'DEVNAME': 'sr0', 'TYPE': 'iso9660',
- 'LABEL': valid_ovf_label},
- {'DEVNAME': 'vda1', 'TYPE': 'ntfs', 'LABEL': 'data'}])
+ ovf_cdrom_by_label["mocks"][0]["out"] = blkid_out(
+ [
+ {"DEVNAME": "sda1", "TYPE": "ext4", "LABEL": "rootfs"},
+ {
+ "DEVNAME": "sr0",
+ "TYPE": "iso9660",
+ "LABEL": valid_ovf_label,
+ },
+ {"DEVNAME": "vda1", "TYPE": "ntfs", "LABEL": "data"},
+ ]
+ )
self._check_via_dict(
- ovf_cdrom_by_label, rc=RC_FOUND, dslist=['OVF', DS_NONE])
+ ovf_cdrom_by_label, rc=RC_FOUND, dslist=["OVF", DS_NONE]
+ )
def test_ovf_on_vmware_iso_found_by_cdrom_with_different_size(self):
"""OVF is identified by well-known iso9660 labels."""
- ovf_cdrom_with_size = copy.deepcopy(VALID_CFG['OVF'])
+ ovf_cdrom_with_size = copy.deepcopy(VALID_CFG["OVF"])
# Set cdrom size to 20480 (10MB in 512 byte units)
- ovf_cdrom_with_size['files']['sys/class/block/sr0/size'] = '20480\n'
+ ovf_cdrom_with_size["files"]["sys/class/block/sr0/size"] = "20480\n"
self._check_via_dict(
- ovf_cdrom_with_size, rc=RC_NOT_FOUND, policy_dmi="disabled")
+ ovf_cdrom_with_size, rc=RC_NOT_FOUND, policy_dmi="disabled"
+ )
# Set cdrom size to 204800 (100MB in 512 byte units)
- ovf_cdrom_with_size['files']['sys/class/block/sr0/size'] = '204800\n'
+ ovf_cdrom_with_size["files"]["sys/class/block/sr0/size"] = "204800\n"
self._check_via_dict(
- ovf_cdrom_with_size, rc=RC_NOT_FOUND, policy_dmi="disabled")
+ ovf_cdrom_with_size, rc=RC_NOT_FOUND, policy_dmi="disabled"
+ )
# Set cdrom size to 18432 (9MB in 512 byte units)
- ovf_cdrom_with_size['files']['sys/class/block/sr0/size'] = '18432\n'
+ ovf_cdrom_with_size["files"]["sys/class/block/sr0/size"] = "18432\n"
self._check_via_dict(
- ovf_cdrom_with_size, rc=RC_FOUND, dslist=['OVF', DS_NONE])
+ ovf_cdrom_with_size, rc=RC_FOUND, dslist=["OVF", DS_NONE]
+ )
# Set cdrom size to 2048 (1MB in 512 byte units)
- ovf_cdrom_with_size['files']['sys/class/block/sr0/size'] = '2048\n'
+ ovf_cdrom_with_size["files"]["sys/class/block/sr0/size"] = "2048\n"
self._check_via_dict(
- ovf_cdrom_with_size, rc=RC_FOUND, dslist=['OVF', DS_NONE])
+ ovf_cdrom_with_size, rc=RC_FOUND, dslist=["OVF", DS_NONE]
+ )
def test_default_nocloud_as_vdb_iso9660(self):
"""NoCloud is found with iso9660 filesystem on non-cdrom disk."""
- self._test_ds_found('NoCloud')
+ self._test_ds_found("NoCloud")
def test_nocloud_upper(self):
"""NoCloud is found with uppercase filesystem label."""
- self._test_ds_found('NoCloudUpper')
+ self._test_ds_found("NoCloudUpper")
def test_nocloud_fatboot(self):
"""NoCloud fatboot label - LP: #184166."""
- self._test_ds_found('NoCloud-fatboot')
+ self._test_ds_found("NoCloud-fatboot")
def test_nocloud_seed(self):
"""Nocloud seed directory."""
- self._test_ds_found('NoCloud-seed')
+ self._test_ds_found("NoCloud-seed")
def test_nocloud_seed_ubuntu_core_writable(self):
"""Nocloud seed directory ubuntu core writable"""
- self._test_ds_found('NoCloud-seed-ubuntu-core')
+ self._test_ds_found("NoCloud-seed-ubuntu-core")
def test_hetzner_found(self):
"""Hetzner cloud is identified in sys_vendor."""
- self._test_ds_found('Hetzner')
+ self._test_ds_found("Hetzner")
def test_smartos_bhyve(self):
"""SmartOS cloud identified by SmartDC in dmi."""
- self._test_ds_found('SmartOS-bhyve')
+ self._test_ds_found("SmartOS-bhyve")
def test_smartos_lxbrand(self):
"""SmartOS cloud identified on lxbrand container."""
- self._test_ds_found('SmartOS-lxbrand')
+ self._test_ds_found("SmartOS-lxbrand")
def test_smartos_lxbrand_requires_socket(self):
"""SmartOS cloud should not be identified if no socket file."""
- mycfg = copy.deepcopy(VALID_CFG['SmartOS-lxbrand'])
- del mycfg['files'][ds_smartos.METADATA_SOCKFILE]
+ mycfg = copy.deepcopy(VALID_CFG["SmartOS-lxbrand"])
+ del mycfg["files"][ds_smartos.METADATA_SOCKFILE]
self._check_via_dict(mycfg, rc=RC_NOT_FOUND, policy_dmi="disabled")
def test_path_env_gets_set_from_main(self):
"""PATH environment should always have some tokens when main is run.
We explicitly call main as we want to ensure it updates PATH."""
- cust = copy.deepcopy(VALID_CFG['NoCloud'])
+ cust = copy.deepcopy(VALID_CFG["NoCloud"])
rootd = self.tmp_dir()
- mpp = 'main-printpath'
+ mpp = "main-printpath"
pre = "MYPATH="
- cust['files'][mpp] = (
- 'PATH="/mycust/path"; main; r=$?; echo ' + pre + '$PATH; exit $r;')
+ cust["files"][mpp] = (
+ 'PATH="/mycust/path"; main; r=$?; echo ' + pre + "$PATH; exit $r;"
+ )
ret = self._check_via_dict(
- cust, RC_FOUND,
- func=".", args=[os.path.join(rootd, mpp)], rootd=rootd)
+ cust,
+ RC_FOUND,
+ func=".",
+ args=[os.path.join(rootd, mpp)],
+ rootd=rootd,
+ )
match = [
line for line in ret.stdout.splitlines() if line.startswith(pre)
][0]
toks = match.replace(pre, "").split(":")
expected = ["/sbin", "/bin", "/usr/sbin", "/usr/bin", "/mycust/path"]
- self.assertEqual(expected, [p for p in expected if p in toks],
- "path did not have expected tokens")
+ self.assertEqual(
+ expected,
+ [p for p in expected if p in toks],
+ "path did not have expected tokens",
+ )
def test_zstack_is_ec2(self):
"""EC2: chassis asset tag ends with 'zstack.io'"""
- self._test_ds_found('Ec2-ZStack')
+ self._test_ds_found("Ec2-ZStack")
def test_e24cloud_is_ec2(self):
"""EC2: e24cloud identified by sys_vendor"""
- self._test_ds_found('Ec2-E24Cloud')
+ self._test_ds_found("Ec2-E24Cloud")
def test_e24cloud_not_active(self):
"""EC2: bobrightbox.com in product_serial is not brightbox'"""
- self._test_ds_not_found('Ec2-E24Cloud-negative')
+ self._test_ds_not_found("Ec2-E24Cloud-negative")
+
+ def test_vmware_no_valid_transports(self):
+ """VMware: no valid transports"""
+ self._test_ds_not_found("VMware-NoValidTransports")
+
+ def test_vmware_envvar_no_data(self):
+ """VMware: envvar transport no data"""
+ self._test_ds_not_found("VMware-EnvVar-NoData")
+
+ def test_vmware_envvar_no_virt_id(self):
+ """VMware: envvar transport success if no virt id"""
+ self._test_ds_found("VMware-EnvVar-NoVirtID")
+
+ def test_vmware_envvar_activated_by_metadata(self):
+ """VMware: envvar transport activated by metadata"""
+ self._test_ds_found("VMware-EnvVar-Metadata")
+
+ def test_vmware_envvar_activated_by_userdata(self):
+ """VMware: envvar transport activated by userdata"""
+ self._test_ds_found("VMware-EnvVar-Userdata")
+
+ def test_vmware_envvar_activated_by_vendordata(self):
+ """VMware: envvar transport activated by vendordata"""
+ self._test_ds_found("VMware-EnvVar-Vendordata")
+
+ def test_vmware_guestinfo_no_data(self):
+ """VMware: guestinfo transport no data"""
+ self._test_ds_not_found("VMware-GuestInfo-NoData")
+
+ def test_vmware_guestinfo_no_virt_id(self):
+ """VMware: guestinfo transport fails if no virt id"""
+ self._test_ds_not_found("VMware-GuestInfo-NoVirtID")
+
+ def test_vmware_guestinfo_activated_by_metadata(self):
+ """VMware: guestinfo transport activated by metadata"""
+ self._test_ds_found("VMware-GuestInfo-Metadata")
+
+ def test_vmware_guestinfo_activated_by_userdata(self):
+ """VMware: guestinfo transport activated by userdata"""
+ self._test_ds_found("VMware-GuestInfo-Userdata")
+
+ def test_vmware_guestinfo_activated_by_vendordata(self):
+ """VMware: guestinfo transport activated by vendordata"""
+ self._test_ds_found("VMware-GuestInfo-Vendordata")
class TestBSDNoSys(DsIdentifyBase):
@@ -663,14 +839,14 @@ class TestBSDNoSys(DsIdentifyBase):
This will be used on FreeBSD systems.
"""
- self._test_ds_found('Hetzner-kenv')
+ self._test_ds_found("Hetzner-kenv")
def test_dmi_dmidecode(self):
"""Test that dmidecode(8) works on systems which don't have /sys
This will be used on all other BSD systems.
"""
- self._test_ds_found('Hetzner-dmidecode')
+ self._test_ds_found("Hetzner-dmidecode")
class TestIsIBMProvisioning(DsIdentifyBase):
@@ -694,9 +870,11 @@ class TestIsIBMProvisioning(DsIdentifyBase):
def test_config_with_old_log(self):
"""A config with a log from previous boot is not provisioning."""
rootd = self.tmp_dir()
- data = {self.prov_cfg: ("key=value\nkey2=val2\n", -10),
- self.inst_log: ("log data\n", -30),
- self.boot_ref: ("PWD=/", 0)}
+ data = {
+ self.prov_cfg: ("key=value\nkey2=val2\n", -10),
+ self.inst_log: ("log data\n", -30),
+ self.boot_ref: ("PWD=/", 0),
+ }
populate_dir_with_ts(rootd, data)
ret = self.call(rootd=rootd, func=self.funcname)
self.assertEqual(shell_false, ret.rc)
@@ -705,9 +883,11 @@ class TestIsIBMProvisioning(DsIdentifyBase):
def test_config_with_new_log(self):
"""A config with a log from this boot is provisioning."""
rootd = self.tmp_dir()
- data = {self.prov_cfg: ("key=value\nkey2=val2\n", -10),
- self.inst_log: ("log data\n", 30),
- self.boot_ref: ("PWD=/", 0)}
+ data = {
+ self.prov_cfg: ("key=value\nkey2=val2\n", -10),
+ self.inst_log: ("log data\n", 30),
+ self.boot_ref: ("PWD=/", 0),
+ }
populate_dir_with_ts(rootd, data)
ret = self.call(rootd=rootd, func=self.funcname)
self.assertEqual(shell_true, ret.rc)
@@ -717,12 +897,12 @@ class TestIsIBMProvisioning(DsIdentifyBase):
class TestOracle(DsIdentifyBase):
def test_found_by_chassis(self):
"""Simple positive test of Oracle by chassis id."""
- self._test_ds_found('Oracle')
+ self._test_ds_found("Oracle")
def test_not_found(self):
"""Simple negative test of Oracle."""
- mycfg = copy.deepcopy(VALID_CFG['Oracle'])
- mycfg['files'][P_CHASSIS_ASSET_TAG] = "Not Oracle"
+ mycfg = copy.deepcopy(VALID_CFG["Oracle"])
+ mycfg["files"][P_CHASSIS_ASSET_TAG] = "Not Oracle"
self._check_via_dict(mycfg, rc=RC_NOT_FOUND)
@@ -739,7 +919,7 @@ def blkid_out(disks=None):
for key in [d for d in disk if d != "DEVNAME"]:
lines.append("%s=%s" % (key, disk[key]))
lines.append("")
- return '\n'.join(lines)
+ return "\n".join(lines)
def geom_out(disks=None):
@@ -756,387 +936,813 @@ def geom_out(disks=None):
disks = []
lines = []
for disk in disks:
- lines.append("%s/%s N/A %s" % (
- disk["TYPE"], disk["LABEL"], disk["DEVNAME"]))
+ lines.append(
+ "%s/%s N/A %s" % (disk["TYPE"], disk["LABEL"], disk["DEVNAME"])
+ )
lines.append("")
- return '\n'.join(lines)
+ return "\n".join(lines)
def _print_run_output(rc, out, err, cfg, files):
"""A helper to print return of TestDsIdentify.
- _print_run_output(self.call())"""
- print('\n'.join([
- '-- rc = %s --' % rc,
- '-- out --', str(out),
- '-- err --', str(err),
- '-- cfg --', util.json_dumps(cfg)]))
- print('-- files --')
+ _print_run_output(self.call())"""
+ print(
+ "\n".join(
+ [
+ "-- rc = %s --" % rc,
+ "-- out --",
+ str(out),
+ "-- err --",
+ str(err),
+ "-- cfg --",
+ util.json_dumps(cfg),
+ ]
+ )
+ )
+ print("-- files --")
for k, v in files.items():
if "/_shwrap" in k:
continue
- print(' === %s ===' % k)
+ print(" === %s ===" % k)
for line in v.splitlines():
print(" " + line)
VALID_CFG = {
- 'AliYun': {
- 'ds': 'AliYun',
- 'files': {P_PRODUCT_NAME: 'Alibaba Cloud ECS\n'},
+ "AliYun": {
+ "ds": "AliYun",
+ "files": {P_PRODUCT_NAME: "Alibaba Cloud ECS\n"},
},
- 'Azure-dmi-detection': {
- 'ds': 'Azure',
- 'files': {
- P_CHASSIS_ASSET_TAG: '7783-7084-3265-9085-8269-3286-77\n',
- }
+ "Azure-dmi-detection": {
+ "ds": "Azure",
+ "files": {
+ P_CHASSIS_ASSET_TAG: "7783-7084-3265-9085-8269-3286-77\n",
+ },
},
- 'Azure-seed-detection': {
- 'ds': 'Azure',
- 'files': {
- P_CHASSIS_ASSET_TAG: 'No-match\n',
- os.path.join(P_SEED_DIR, 'azure', 'ovf-env.xml'): 'present\n',
- }
+ "Azure-seed-detection": {
+ "ds": "Azure",
+ "files": {
+ P_CHASSIS_ASSET_TAG: "No-match\n",
+ os.path.join(P_SEED_DIR, "azure", "ovf-env.xml"): "present\n",
+ },
},
- 'Ec2-hvm': {
- 'ds': 'Ec2',
- 'mocks': [{'name': 'detect_virt', 'RET': 'kvm', 'ret': 0}],
- 'files': {
- P_PRODUCT_SERIAL: 'ec23aef5-54be-4843-8d24-8c819f88453e\n',
- P_PRODUCT_UUID: 'EC23AEF5-54BE-4843-8D24-8C819F88453E\n',
- }
+ "Ec2-hvm": {
+ "ds": "Ec2",
+ "mocks": [{"name": "detect_virt", "RET": "kvm", "ret": 0}],
+ "files": {
+ P_PRODUCT_SERIAL: "ec23aef5-54be-4843-8d24-8c819f88453e\n",
+ P_PRODUCT_UUID: "EC23AEF5-54BE-4843-8D24-8C819F88453E\n",
+ },
},
- 'Ec2-xen': {
- 'ds': 'Ec2',
- 'mocks': [MOCK_VIRT_IS_XEN],
- 'files': {
- 'sys/hypervisor/uuid': 'ec2c6e2f-5fac-4fc7-9c82-74127ec14bbb\n'
+ "Ec2-xen": {
+ "ds": "Ec2",
+ "mocks": [MOCK_VIRT_IS_XEN],
+ "files": {
+ "sys/hypervisor/uuid": "ec2c6e2f-5fac-4fc7-9c82-74127ec14bbb\n"
},
},
- 'Ec2-brightbox': {
- 'ds': 'Ec2',
- 'files': {P_PRODUCT_SERIAL: 'srv-otuxg.gb1.brightbox.com\n'},
+ "Ec2-brightbox": {
+ "ds": "Ec2",
+ "files": {P_PRODUCT_SERIAL: "srv-otuxg.gb1.brightbox.com\n"},
},
- 'Ec2-brightbox-negative': {
- 'ds': 'Ec2',
- 'files': {P_PRODUCT_SERIAL: 'tricky-host.bobrightbox.com\n'},
+ "Ec2-brightbox-negative": {
+ "ds": "Ec2",
+ "files": {P_PRODUCT_SERIAL: "tricky-host.bobrightbox.com\n"},
},
- 'GCE': {
- 'ds': 'GCE',
- 'files': {P_PRODUCT_NAME: 'Google Compute Engine\n'},
- 'mocks': [MOCK_VIRT_IS_KVM],
+ "GCE": {
+ "ds": "GCE",
+ "files": {P_PRODUCT_NAME: "Google Compute Engine\n"},
+ "mocks": [MOCK_VIRT_IS_KVM],
},
- 'GCE-serial': {
- 'ds': 'GCE',
- 'files': {P_PRODUCT_SERIAL: 'GoogleCloud-8f2e88f\n'},
- 'mocks': [MOCK_VIRT_IS_KVM],
+ "GCE-serial": {
+ "ds": "GCE",
+ "files": {P_PRODUCT_SERIAL: "GoogleCloud-8f2e88f\n"},
+ "mocks": [MOCK_VIRT_IS_KVM],
},
- 'NoCloud': {
- 'ds': 'NoCloud',
- 'mocks': [
+ "NoCloud": {
+ "ds": "NoCloud",
+ "mocks": [
MOCK_VIRT_IS_KVM,
- {'name': 'blkid', 'ret': 0,
- 'out': blkid_out(
- BLKID_UEFI_UBUNTU +
- [{'DEVNAME': 'vdb', 'TYPE': 'iso9660', 'LABEL': 'cidata'}])},
+ {
+ "name": "blkid",
+ "ret": 0,
+ "out": blkid_out(
+ BLKID_UEFI_UBUNTU
+ + [
+ {
+ "DEVNAME": "vdb",
+ "TYPE": "iso9660",
+ "LABEL": "cidata",
+ }
+ ]
+ ),
+ },
],
- 'files': {
- 'dev/vdb': 'pretend iso content for cidata\n',
- }
+ "files": {
+ "dev/vdb": "pretend iso content for cidata\n",
+ },
},
- 'NoCloud-fbsd': {
- 'ds': 'NoCloud',
- 'mocks': [
+ "NoCloud-fbsd": {
+ "ds": "NoCloud",
+ "mocks": [
MOCK_VIRT_IS_KVM,
MOCK_UNAME_IS_FREEBSD,
- {'name': 'geom', 'ret': 0,
- 'out': geom_out(
- [{'DEVNAME': 'vtbd', 'TYPE': 'iso9660', 'LABEL': 'cidata'}])},
+ {
+ "name": "geom",
+ "ret": 0,
+ "out": geom_out(
+ [{"DEVNAME": "vtbd", "TYPE": "iso9660", "LABEL": "cidata"}]
+ ),
+ },
],
- 'files': {
- '/dev/vtdb': 'pretend iso content for cidata\n',
- }
+ "files": {
+ "/dev/vtdb": "pretend iso content for cidata\n",
+ },
},
- 'NoCloudUpper': {
- 'ds': 'NoCloud',
- 'mocks': [
+ "NoCloudUpper": {
+ "ds": "NoCloud",
+ "mocks": [
MOCK_VIRT_IS_KVM,
- {'name': 'blkid', 'ret': 0,
- 'out': blkid_out(
- BLKID_UEFI_UBUNTU +
- [{'DEVNAME': 'vdb', 'TYPE': 'iso9660', 'LABEL': 'CIDATA'}])},
+ {
+ "name": "blkid",
+ "ret": 0,
+ "out": blkid_out(
+ BLKID_UEFI_UBUNTU
+ + [
+ {
+ "DEVNAME": "vdb",
+ "TYPE": "iso9660",
+ "LABEL": "CIDATA",
+ }
+ ]
+ ),
+ },
],
- 'files': {
- 'dev/vdb': 'pretend iso content for cidata\n',
- }
+ "files": {
+ "dev/vdb": "pretend iso content for cidata\n",
+ },
},
- 'NoCloud-fatboot': {
- 'ds': 'NoCloud',
- 'mocks': [
+ "NoCloud-fatboot": {
+ "ds": "NoCloud",
+ "mocks": [
MOCK_VIRT_IS_XEN,
- {'name': 'blkid', 'ret': 0,
- 'out': blkid_out(
- BLKID_UEFI_UBUNTU +
- [{'DEVNAME': 'xvdb', 'TYPE': 'vfat', 'SEC_TYPE': 'msdos',
- 'UUID': '355a-4FC2', 'LABEL_FATBOOT': 'cidata'}])},
+ {
+ "name": "blkid",
+ "ret": 0,
+ "out": blkid_out(
+ BLKID_UEFI_UBUNTU
+ + [
+ {
+ "DEVNAME": "xvdb",
+ "TYPE": "vfat",
+ "SEC_TYPE": "msdos",
+ "UUID": "355a-4FC2",
+ "LABEL_FATBOOT": "cidata",
+ }
+ ]
+ ),
+ },
],
- 'files': {
- 'dev/vdb': 'pretend iso content for cidata\n',
- }
+ "files": {
+ "dev/vdb": "pretend iso content for cidata\n",
+ },
},
- 'NoCloud-seed': {
- 'ds': 'NoCloud',
- 'files': {
- os.path.join(P_SEED_DIR, 'nocloud', 'user-data'): 'ud\n',
- os.path.join(P_SEED_DIR, 'nocloud', 'meta-data'): 'md\n',
- }
+ "NoCloud-seed": {
+ "ds": "NoCloud",
+ "files": {
+ os.path.join(P_SEED_DIR, "nocloud", "user-data"): "ud\n",
+ os.path.join(P_SEED_DIR, "nocloud", "meta-data"): "md\n",
+ },
},
- 'NoCloud-seed-ubuntu-core': {
- 'ds': 'NoCloud',
- 'files': {
- os.path.join('writable/system-data', P_SEED_DIR,
- 'nocloud-net', 'user-data'): 'ud\n',
- os.path.join('writable/system-data', P_SEED_DIR,
- 'nocloud-net', 'meta-data'): 'md\n',
- }
+ "NoCloud-seed-ubuntu-core": {
+ "ds": "NoCloud",
+ "files": {
+ os.path.join(
+ "writable/system-data", P_SEED_DIR, "nocloud-net", "user-data"
+ ): "ud\n",
+ os.path.join(
+ "writable/system-data", P_SEED_DIR, "nocloud-net", "meta-data"
+ ): "md\n",
+ },
},
- 'OpenStack': {
- 'ds': 'OpenStack',
- 'files': {P_PRODUCT_NAME: 'OpenStack Nova\n'},
- 'mocks': [MOCK_VIRT_IS_KVM],
- 'policy_dmi': POLICY_FOUND_ONLY,
- 'policy_no_dmi': POLICY_FOUND_ONLY,
+ "OpenStack": {
+ "ds": "OpenStack",
+ "files": {P_PRODUCT_NAME: "OpenStack Nova\n"},
+ "mocks": [MOCK_VIRT_IS_KVM],
+ "policy_dmi": POLICY_FOUND_ONLY,
+ "policy_no_dmi": POLICY_FOUND_ONLY,
},
- 'OpenStack-OpenTelekom': {
+ "OpenStack-OpenTelekom": {
# OTC gen1 (Xen) hosts use OpenStack datasource, LP: #1756471
- 'ds': 'OpenStack',
- 'files': {P_CHASSIS_ASSET_TAG: 'OpenTelekomCloud\n'},
- 'mocks': [MOCK_VIRT_IS_XEN],
+ "ds": "OpenStack",
+ "files": {P_CHASSIS_ASSET_TAG: "OpenTelekomCloud\n"},
+ "mocks": [MOCK_VIRT_IS_XEN],
},
- 'OpenStack-SAPCCloud': {
+ "OpenStack-SAPCCloud": {
# SAP CCloud hosts use OpenStack on VMware
- 'ds': 'OpenStack',
- 'files': {P_CHASSIS_ASSET_TAG: 'SAP CCloud VM\n'},
- 'mocks': [MOCK_VIRT_IS_VMWARE],
+ "ds": "OpenStack",
+ "files": {P_CHASSIS_ASSET_TAG: "SAP CCloud VM\n"},
+ "mocks": [MOCK_VIRT_IS_VMWARE],
},
- 'OpenStack-AssetTag-Nova': {
+ "OpenStack-AssetTag-Nova": {
# VMware vSphere can't modify product-name, LP: #1669875
- 'ds': 'OpenStack',
- 'files': {P_CHASSIS_ASSET_TAG: 'OpenStack Nova\n'},
- 'mocks': [MOCK_VIRT_IS_XEN],
+ "ds": "OpenStack",
+ "files": {P_CHASSIS_ASSET_TAG: "OpenStack Nova\n"},
+ "mocks": [MOCK_VIRT_IS_XEN],
},
- 'OpenStack-AssetTag-Compute': {
+ "OpenStack-AssetTag-Compute": {
# VMware vSphere can't modify product-name, LP: #1669875
- 'ds': 'OpenStack',
- 'files': {P_CHASSIS_ASSET_TAG: 'OpenStack Compute\n'},
- 'mocks': [MOCK_VIRT_IS_XEN],
+ "ds": "OpenStack",
+ "files": {P_CHASSIS_ASSET_TAG: "OpenStack Compute\n"},
+ "mocks": [MOCK_VIRT_IS_XEN],
},
- 'OVF-seed': {
- 'ds': 'OVF',
- 'files': {
- os.path.join(P_SEED_DIR, 'ovf', 'ovf-env.xml'): 'present\n',
- }
+ "OVF-seed": {
+ "ds": "OVF",
+ "files": {
+ os.path.join(P_SEED_DIR, "ovf", "ovf-env.xml"): "present\n",
+ },
},
- 'OVF-vmware-customization': {
- 'ds': 'OVF',
- 'mocks': [
+ "OVF-vmware-customization": {
+ "ds": "OVF",
+ "mocks": [
# Include a mockes iso9660 potential, even though content not ovf
- {'name': 'blkid', 'ret': 0,
- 'out': blkid_out(
- [{'DEVNAME': 'sr0', 'TYPE': 'iso9660', 'LABEL': ''}])
- },
+ {
+ "name": "blkid",
+ "ret": 0,
+ "out": blkid_out(
+ [{"DEVNAME": "sr0", "TYPE": "iso9660", "LABEL": ""}]
+ ),
+ },
MOCK_VIRT_IS_VMWARE,
],
- 'files': {
- 'dev/sr0': 'no match',
+ "files": {
+ "dev/sr0": "no match",
# Setup vmware customization enabled
- 'usr/lib/vmware-tools/plugins/vmsvc/libdeployPkgPlugin.so': 'here',
- 'etc/cloud/cloud.cfg': 'disable_vmware_customization: false\n',
- }
+ "usr/lib/vmware-tools/plugins/vmsvc/libdeployPkgPlugin.so": "here",
+ "etc/cloud/cloud.cfg": "disable_vmware_customization: false\n",
+ },
},
- 'OVF': {
- 'ds': 'OVF',
- 'mocks': [
- {'name': 'blkid', 'ret': 0,
- 'out': blkid_out(
- [{'DEVNAME': 'sr0', 'TYPE': 'iso9660', 'LABEL': ''},
- {'DEVNAME': 'sr1', 'TYPE': 'iso9660', 'LABEL': 'ignoreme'},
- {'DEVNAME': 'vda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()}]),
- },
+ "OVF": {
+ "ds": "OVF",
+ "mocks": [
+ {
+ "name": "blkid",
+ "ret": 0,
+ "out": blkid_out(
+ [
+ {"DEVNAME": "sr0", "TYPE": "iso9660", "LABEL": ""},
+ {
+ "DEVNAME": "sr1",
+ "TYPE": "iso9660",
+ "LABEL": "ignoreme",
+ },
+ {
+ "DEVNAME": "vda1",
+ "TYPE": "vfat",
+ "PARTUUID": uuid4(),
+ },
+ ]
+ ),
+ },
MOCK_VIRT_IS_VMWARE,
],
- 'files': {
- 'dev/sr0': 'pretend ovf iso has ' + OVF_MATCH_STRING + '\n',
- 'sys/class/block/sr0/size': '2048\n',
- }
+ "files": {
+ "dev/sr0": "pretend ovf iso has " + OVF_MATCH_STRING + "\n",
+ "sys/class/block/sr0/size": "2048\n",
+ },
},
- 'OVF-guestinfo': {
- 'ds': 'OVF',
- 'mocks': [
- {'name': 'ovf_vmware_transport_guestinfo', 'ret': 0,
- 'out': '<?xml version="1.0" encoding="UTF-8"?>\n<Environment'},
+ "OVF-guestinfo": {
+ "ds": "OVF",
+ "mocks": [
+ {
+ "name": "ovf_vmware_transport_guestinfo",
+ "ret": 0,
+ "out": '<?xml version="1.0" encoding="UTF-8"?>\n<Environment',
+ },
MOCK_VIRT_IS_VMWARE,
],
},
- 'ConfigDrive': {
- 'ds': 'ConfigDrive',
- 'mocks': [
- {'name': 'blkid', 'ret': 0,
- 'out': blkid_out(
- [{'DEVNAME': 'vda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()},
- {'DEVNAME': 'vda2', 'TYPE': 'ext4',
- 'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4()},
- {'DEVNAME': 'vdb', 'TYPE': 'vfat', 'LABEL': 'config-2'}])
- },
+ "ConfigDrive": {
+ "ds": "ConfigDrive",
+ "mocks": [
+ {
+ "name": "blkid",
+ "ret": 0,
+ "out": blkid_out(
+ [
+ {
+ "DEVNAME": "vda1",
+ "TYPE": "vfat",
+ "PARTUUID": uuid4(),
+ },
+ {
+ "DEVNAME": "vda2",
+ "TYPE": "ext4",
+ "LABEL": "cloudimg-rootfs",
+ "PARTUUID": uuid4(),
+ },
+ {
+ "DEVNAME": "vdb",
+ "TYPE": "vfat",
+ "LABEL": "config-2",
+ },
+ ]
+ ),
+ },
],
},
- 'ConfigDriveUpper': {
- 'ds': 'ConfigDrive',
- 'mocks': [
- {'name': 'blkid', 'ret': 0,
- 'out': blkid_out(
- [{'DEVNAME': 'vda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()},
- {'DEVNAME': 'vda2', 'TYPE': 'ext4',
- 'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4()},
- {'DEVNAME': 'vdb', 'TYPE': 'vfat', 'LABEL': 'CONFIG-2'}])
- },
+ "ConfigDriveUpper": {
+ "ds": "ConfigDrive",
+ "mocks": [
+ {
+ "name": "blkid",
+ "ret": 0,
+ "out": blkid_out(
+ [
+ {
+ "DEVNAME": "vda1",
+ "TYPE": "vfat",
+ "PARTUUID": uuid4(),
+ },
+ {
+ "DEVNAME": "vda2",
+ "TYPE": "ext4",
+ "LABEL": "cloudimg-rootfs",
+ "PARTUUID": uuid4(),
+ },
+ {
+ "DEVNAME": "vdb",
+ "TYPE": "vfat",
+ "LABEL": "CONFIG-2",
+ },
+ ]
+ ),
+ },
],
},
- 'ConfigDrive-seed': {
- 'ds': 'ConfigDrive',
- 'files': {
- os.path.join(P_SEED_DIR, 'config_drive', 'openstack',
- 'latest', 'meta_data.json'): 'md\n'},
+ "ConfigDrive-seed": {
+ "ds": "ConfigDrive",
+ "files": {
+ os.path.join(
+ P_SEED_DIR,
+ "config_drive",
+ "openstack",
+ "latest",
+ "meta_data.json",
+ ): "md\n"
+ },
},
- 'RbxCloud': {
- 'ds': 'RbxCloud',
- 'mocks': [
- {'name': 'blkid', 'ret': 0,
- 'out': blkid_out(
- [{'DEVNAME': 'vda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()},
- {'DEVNAME': 'vda2', 'TYPE': 'ext4',
- 'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4()},
- {'DEVNAME': 'vdb', 'TYPE': 'vfat', 'LABEL': 'CLOUDMD'}]
- )},
+ "RbxCloud": {
+ "ds": "RbxCloud",
+ "mocks": [
+ {
+ "name": "blkid",
+ "ret": 0,
+ "out": blkid_out(
+ [
+ {
+ "DEVNAME": "vda1",
+ "TYPE": "vfat",
+ "PARTUUID": uuid4(),
+ },
+ {
+ "DEVNAME": "vda2",
+ "TYPE": "ext4",
+ "LABEL": "cloudimg-rootfs",
+ "PARTUUID": uuid4(),
+ },
+ {"DEVNAME": "vdb", "TYPE": "vfat", "LABEL": "CLOUDMD"},
+ ]
+ ),
+ },
],
},
- 'RbxCloudLower': {
- 'ds': 'RbxCloud',
- 'mocks': [
- {'name': 'blkid', 'ret': 0,
- 'out': blkid_out(
- [{'DEVNAME': 'vda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()},
- {'DEVNAME': 'vda2', 'TYPE': 'ext4',
- 'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4()},
- {'DEVNAME': 'vdb', 'TYPE': 'vfat', 'LABEL': 'cloudmd'}]
- )},
+ "RbxCloudLower": {
+ "ds": "RbxCloud",
+ "mocks": [
+ {
+ "name": "blkid",
+ "ret": 0,
+ "out": blkid_out(
+ [
+ {
+ "DEVNAME": "vda1",
+ "TYPE": "vfat",
+ "PARTUUID": uuid4(),
+ },
+ {
+ "DEVNAME": "vda2",
+ "TYPE": "ext4",
+ "LABEL": "cloudimg-rootfs",
+ "PARTUUID": uuid4(),
+ },
+ {"DEVNAME": "vdb", "TYPE": "vfat", "LABEL": "cloudmd"},
+ ]
+ ),
+ },
],
},
- 'Hetzner': {
- 'ds': 'Hetzner',
- 'files': {P_SYS_VENDOR: 'Hetzner\n'},
+ "Hetzner": {
+ "ds": "Hetzner",
+ "files": {P_SYS_VENDOR: "Hetzner\n"},
},
- 'Hetzner-kenv': {
- 'ds': 'Hetzner',
- 'mocks': [
+ "Hetzner-kenv": {
+ "ds": "Hetzner",
+ "mocks": [
MOCK_UNAME_IS_FREEBSD,
- {'name': 'get_kenv_field', 'ret': 0, 'RET': 'Hetzner'}
+ {"name": "get_kenv_field", "ret": 0, "RET": "Hetzner"},
],
},
- 'Hetzner-dmidecode': {
- 'ds': 'Hetzner',
- 'mocks': [
- {'name': 'dmi_decode', 'ret': 0, 'RET': 'Hetzner'}
- ],
+ "Hetzner-dmidecode": {
+ "ds": "Hetzner",
+ "mocks": [{"name": "dmi_decode", "ret": 0, "RET": "Hetzner"}],
},
- 'IBMCloud-metadata': {
- 'ds': 'IBMCloud',
- 'mocks': [
+ "IBMCloud-metadata": {
+ "ds": "IBMCloud",
+ "mocks": [
MOCK_VIRT_IS_XEN,
- {'name': 'is_ibm_provisioning', 'ret': shell_false},
- {'name': 'blkid', 'ret': 0,
- 'out': blkid_out(
- [{'DEVNAME': 'xvda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()},
- {'DEVNAME': 'xvda2', 'TYPE': 'ext4',
- 'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4()},
- {'DEVNAME': 'xvdb', 'TYPE': 'vfat', 'LABEL': 'METADATA'}]),
- },
+ {"name": "is_ibm_provisioning", "ret": shell_false},
+ {
+ "name": "blkid",
+ "ret": 0,
+ "out": blkid_out(
+ [
+ {
+ "DEVNAME": "xvda1",
+ "TYPE": "vfat",
+ "PARTUUID": uuid4(),
+ },
+ {
+ "DEVNAME": "xvda2",
+ "TYPE": "ext4",
+ "LABEL": "cloudimg-rootfs",
+ "PARTUUID": uuid4(),
+ },
+ {
+ "DEVNAME": "xvdb",
+ "TYPE": "vfat",
+ "LABEL": "METADATA",
+ },
+ ]
+ ),
+ },
],
},
- 'IBMCloud-config-2': {
- 'ds': 'IBMCloud',
- 'mocks': [
+ "IBMCloud-config-2": {
+ "ds": "IBMCloud",
+ "mocks": [
MOCK_VIRT_IS_XEN,
- {'name': 'is_ibm_provisioning', 'ret': shell_false},
- {'name': 'blkid', 'ret': 0,
- 'out': blkid_out(
- [{'DEVNAME': 'xvda1', 'TYPE': 'ext3', 'PARTUUID': uuid4(),
- 'UUID': uuid4(), 'LABEL': 'cloudimg-bootfs'},
- {'DEVNAME': 'xvdb', 'TYPE': 'vfat', 'LABEL': 'config-2',
- 'UUID': ds_ibm.IBM_CONFIG_UUID},
- {'DEVNAME': 'xvda2', 'TYPE': 'ext4',
- 'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4(),
- 'UUID': uuid4()},
- ]),
- },
+ {"name": "is_ibm_provisioning", "ret": shell_false},
+ {
+ "name": "blkid",
+ "ret": 0,
+ "out": blkid_out(
+ [
+ {
+ "DEVNAME": "xvda1",
+ "TYPE": "ext3",
+ "PARTUUID": uuid4(),
+ "UUID": uuid4(),
+ "LABEL": "cloudimg-bootfs",
+ },
+ {
+ "DEVNAME": "xvdb",
+ "TYPE": "vfat",
+ "LABEL": "config-2",
+ "UUID": ds_ibm.IBM_CONFIG_UUID,
+ },
+ {
+ "DEVNAME": "xvda2",
+ "TYPE": "ext4",
+ "LABEL": "cloudimg-rootfs",
+ "PARTUUID": uuid4(),
+ "UUID": uuid4(),
+ },
+ ]
+ ),
+ },
],
},
- 'IBMCloud-nodisks': {
- 'ds': 'IBMCloud',
- 'mocks': [
+ "IBMCloud-nodisks": {
+ "ds": "IBMCloud",
+ "mocks": [
MOCK_VIRT_IS_XEN,
- {'name': 'is_ibm_provisioning', 'ret': shell_false},
- {'name': 'blkid', 'ret': 0,
- 'out': blkid_out(
- [{'DEVNAME': 'xvda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()},
- {'DEVNAME': 'xvda2', 'TYPE': 'ext4',
- 'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4()}]),
- },
+ {"name": "is_ibm_provisioning", "ret": shell_false},
+ {
+ "name": "blkid",
+ "ret": 0,
+ "out": blkid_out(
+ [
+ {
+ "DEVNAME": "xvda1",
+ "TYPE": "vfat",
+ "PARTUUID": uuid4(),
+ },
+ {
+ "DEVNAME": "xvda2",
+ "TYPE": "ext4",
+ "LABEL": "cloudimg-rootfs",
+ "PARTUUID": uuid4(),
+ },
+ ]
+ ),
+ },
],
},
- 'Oracle': {
- 'ds': 'Oracle',
- 'files': {
- P_CHASSIS_ASSET_TAG: ds_oracle.CHASSIS_ASSET_TAG + '\n',
- }
+ "Oracle": {
+ "ds": "Oracle",
+ "files": {
+ P_CHASSIS_ASSET_TAG: ds_oracle.CHASSIS_ASSET_TAG + "\n",
+ },
},
- 'SmartOS-bhyve': {
- 'ds': 'SmartOS',
- 'mocks': [
+ "SmartOS-bhyve": {
+ "ds": "SmartOS",
+ "mocks": [
MOCK_VIRT_IS_VM_OTHER,
- {'name': 'blkid', 'ret': 0,
- 'out': blkid_out(
- [{'DEVNAME': 'vda1', 'TYPE': 'ext4',
- 'PARTUUID': '49ec635a-01'},
- {'DEVNAME': 'vda2', 'TYPE': 'swap',
- 'LABEL': 'cloudimg-swap', 'PARTUUID': '49ec635a-02'}]),
- },
+ {
+ "name": "blkid",
+ "ret": 0,
+ "out": blkid_out(
+ [
+ {
+ "DEVNAME": "vda1",
+ "TYPE": "ext4",
+ "PARTUUID": "49ec635a-01",
+ },
+ {
+ "DEVNAME": "vda2",
+ "TYPE": "swap",
+ "LABEL": "cloudimg-swap",
+ "PARTUUID": "49ec635a-02",
+ },
+ ]
+ ),
+ },
],
- 'files': {P_PRODUCT_NAME: 'SmartDC HVM\n'},
+ "files": {P_PRODUCT_NAME: "SmartDC HVM\n"},
},
- 'SmartOS-lxbrand': {
- 'ds': 'SmartOS',
- 'mocks': [
+ "SmartOS-lxbrand": {
+ "ds": "SmartOS",
+ "mocks": [
MOCK_VIRT_IS_CONTAINER_OTHER,
- {'name': 'uname', 'ret': 0,
- 'out': ("Linux d43da87a-daca-60e8-e6d4-d2ed372662a3 4.3.0 "
- "BrandZ virtual linux x86_64 GNU/Linux")},
- {'name': 'blkid', 'ret': 2, 'out': ''},
+ {
+ "name": "uname",
+ "ret": 0,
+ "out": (
+ "Linux d43da87a-daca-60e8-e6d4-d2ed372662a3 4.3.0 "
+ "BrandZ virtual linux x86_64 GNU/Linux"
+ ),
+ },
+ {"name": "blkid", "ret": 2, "out": ""},
+ ],
+ "files": {ds_smartos.METADATA_SOCKFILE: "would be a socket\n"},
+ },
+ "Ec2-ZStack": {
+ "ds": "Ec2",
+ "files": {P_CHASSIS_ASSET_TAG: "123456.zstack.io\n"},
+ },
+ "Ec2-E24Cloud": {
+ "ds": "Ec2",
+ "files": {P_SYS_VENDOR: "e24cloud\n"},
+ },
+ "Ec2-E24Cloud-negative": {
+ "ds": "Ec2",
+ "files": {P_SYS_VENDOR: "e24cloudyday\n"},
+ },
+ "VMware-NoValidTransports": {
+ "ds": "VMware",
+ "mocks": [
+ MOCK_VIRT_IS_VMWARE,
+ ],
+ },
+ "VMware-EnvVar-NoData": {
+ "ds": "VMware",
+ "mocks": [
+ {
+ "name": "vmware_has_envvar_vmx_guestinfo",
+ "ret": 0,
+ },
+ {
+ "name": "vmware_has_envvar_vmx_guestinfo_metadata",
+ "ret": 1,
+ },
+ {
+ "name": "vmware_has_envvar_vmx_guestinfo_userdata",
+ "ret": 1,
+ },
+ {
+ "name": "vmware_has_envvar_vmx_guestinfo_vendordata",
+ "ret": 1,
+ },
+ MOCK_VIRT_IS_VMWARE,
],
- 'files': {ds_smartos.METADATA_SOCKFILE: 'would be a socket\n'},
},
- 'Ec2-ZStack': {
- 'ds': 'Ec2',
- 'files': {P_CHASSIS_ASSET_TAG: '123456.zstack.io\n'},
+ "VMware-EnvVar-NoVirtID": {
+ "ds": "VMware",
+ "mocks": [
+ {
+ "name": "vmware_has_envvar_vmx_guestinfo",
+ "ret": 0,
+ },
+ {
+ "name": "vmware_has_envvar_vmx_guestinfo_metadata",
+ "ret": 0,
+ },
+ {
+ "name": "vmware_has_envvar_vmx_guestinfo_userdata",
+ "ret": 1,
+ },
+ {
+ "name": "vmware_has_envvar_vmx_guestinfo_vendordata",
+ "ret": 1,
+ },
+ ],
+ },
+ "VMware-EnvVar-Metadata": {
+ "ds": "VMware",
+ "mocks": [
+ {
+ "name": "vmware_has_envvar_vmx_guestinfo",
+ "ret": 0,
+ },
+ {
+ "name": "vmware_has_envvar_vmx_guestinfo_metadata",
+ "ret": 0,
+ },
+ {
+ "name": "vmware_has_envvar_vmx_guestinfo_userdata",
+ "ret": 1,
+ },
+ {
+ "name": "vmware_has_envvar_vmx_guestinfo_vendordata",
+ "ret": 1,
+ },
+ MOCK_VIRT_IS_VMWARE,
+ ],
+ },
+ "VMware-EnvVar-Userdata": {
+ "ds": "VMware",
+ "mocks": [
+ {
+ "name": "vmware_has_envvar_vmx_guestinfo",
+ "ret": 0,
+ },
+ {
+ "name": "vmware_has_envvar_vmx_guestinfo_metadata",
+ "ret": 1,
+ },
+ {
+ "name": "vmware_has_envvar_vmx_guestinfo_userdata",
+ "ret": 0,
+ },
+ {
+ "name": "vmware_has_envvar_vmx_guestinfo_vendordata",
+ "ret": 1,
+ },
+ MOCK_VIRT_IS_VMWARE,
+ ],
+ },
+ "VMware-EnvVar-Vendordata": {
+ "ds": "VMware",
+ "mocks": [
+ {
+ "name": "vmware_has_envvar_vmx_guestinfo",
+ "ret": 0,
+ },
+ {
+ "name": "vmware_has_envvar_vmx_guestinfo_metadata",
+ "ret": 1,
+ },
+ {
+ "name": "vmware_has_envvar_vmx_guestinfo_userdata",
+ "ret": 1,
+ },
+ {
+ "name": "vmware_has_envvar_vmx_guestinfo_vendordata",
+ "ret": 0,
+ },
+ MOCK_VIRT_IS_VMWARE,
+ ],
},
- 'Ec2-E24Cloud': {
- 'ds': 'Ec2',
- 'files': {P_SYS_VENDOR: 'e24cloud\n'},
+ "VMware-GuestInfo-NoData": {
+ "ds": "VMware",
+ "mocks": [
+ {
+ "name": "vmware_has_rpctool",
+ "ret": 0,
+ "out": "/usr/bin/vmware-rpctool",
+ },
+ {
+ "name": "vmware_rpctool_guestinfo_metadata",
+ "ret": 1,
+ },
+ {
+ "name": "vmware_rpctool_guestinfo_userdata",
+ "ret": 1,
+ },
+ {
+ "name": "vmware_rpctool_guestinfo_vendordata",
+ "ret": 1,
+ },
+ MOCK_VIRT_IS_VMWARE,
+ ],
+ },
+ "VMware-GuestInfo-NoVirtID": {
+ "ds": "VMware",
+ "mocks": [
+ {
+ "name": "vmware_has_rpctool",
+ "ret": 0,
+ "out": "/usr/bin/vmware-rpctool",
+ },
+ {
+ "name": "vmware_rpctool_guestinfo_metadata",
+ "ret": 0,
+ "out": "---",
+ },
+ {
+ "name": "vmware_rpctool_guestinfo_userdata",
+ "ret": 1,
+ },
+ {
+ "name": "vmware_rpctool_guestinfo_vendordata",
+ "ret": 1,
+ },
+ ],
+ },
+ "VMware-GuestInfo-Metadata": {
+ "ds": "VMware",
+ "mocks": [
+ {
+ "name": "vmware_has_rpctool",
+ "ret": 0,
+ "out": "/usr/bin/vmware-rpctool",
+ },
+ {
+ "name": "vmware_rpctool_guestinfo_metadata",
+ "ret": 0,
+ "out": "---",
+ },
+ {
+ "name": "vmware_rpctool_guestinfo_userdata",
+ "ret": 1,
+ },
+ {
+ "name": "vmware_rpctool_guestinfo_vendordata",
+ "ret": 1,
+ },
+ MOCK_VIRT_IS_VMWARE,
+ ],
+ },
+ "VMware-GuestInfo-Userdata": {
+ "ds": "VMware",
+ "mocks": [
+ {
+ "name": "vmware_has_rpctool",
+ "ret": 0,
+ "out": "/usr/bin/vmware-rpctool",
+ },
+ {
+ "name": "vmware_rpctool_guestinfo_metadata",
+ "ret": 1,
+ },
+ {
+ "name": "vmware_rpctool_guestinfo_userdata",
+ "ret": 0,
+ "out": "---",
+ },
+ {
+ "name": "vmware_rpctool_guestinfo_vendordata",
+ "ret": 1,
+ },
+ MOCK_VIRT_IS_VMWARE,
+ ],
+ },
+ "VMware-GuestInfo-Vendordata": {
+ "ds": "VMware",
+ "mocks": [
+ {
+ "name": "vmware_has_rpctool",
+ "ret": 0,
+ "out": "/usr/bin/vmware-rpctool",
+ },
+ {
+ "name": "vmware_rpctool_guestinfo_metadata",
+ "ret": 1,
+ },
+ {
+ "name": "vmware_rpctool_guestinfo_userdata",
+ "ret": 1,
+ },
+ {
+ "name": "vmware_rpctool_guestinfo_vendordata",
+ "ret": 0,
+ "out": "---",
+ },
+ MOCK_VIRT_IS_VMWARE,
+ ],
},
- 'Ec2-E24Cloud-negative': {
- 'ds': 'Ec2',
- 'files': {P_SYS_VENDOR: 'e24cloudyday\n'},
- }
}
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_ec2_util.py b/tests/unittests/test_ec2_util.py
index 3f50f57d..f447d295 100644
--- a/tests/unittests/test_ec2_util.py
+++ b/tests/unittests/test_ec2_util.py
@@ -2,178 +2,276 @@
import httpretty as hp
-from cloudinit.tests import helpers
-
from cloudinit import ec2_utils as eu
from cloudinit import url_helper as uh
+from tests.unittests import helpers
class TestEc2Util(helpers.HttprettyTestCase):
- VERSION = 'latest'
+ VERSION = "latest"
def test_userdata_fetch(self):
- hp.register_uri(hp.GET,
- 'http://169.254.169.254/%s/user-data' % (self.VERSION),
- body='stuff',
- status=200)
+ hp.register_uri(
+ hp.GET,
+ "http://169.254.169.254/%s/user-data" % (self.VERSION),
+ body="stuff",
+ status=200,
+ )
userdata = eu.get_instance_userdata(self.VERSION)
- self.assertEqual('stuff', userdata.decode('utf-8'))
+ self.assertEqual("stuff", userdata.decode("utf-8"))
def test_userdata_fetch_fail_not_found(self):
- hp.register_uri(hp.GET,
- 'http://169.254.169.254/%s/user-data' % (self.VERSION),
- status=404)
+ hp.register_uri(
+ hp.GET,
+ "http://169.254.169.254/%s/user-data" % (self.VERSION),
+ status=404,
+ )
userdata = eu.get_instance_userdata(self.VERSION, retries=0)
- self.assertEqual('', userdata)
+ self.assertEqual("", userdata)
def test_userdata_fetch_fail_server_dead(self):
- hp.register_uri(hp.GET,
- 'http://169.254.169.254/%s/user-data' % (self.VERSION),
- status=500)
+ hp.register_uri(
+ hp.GET,
+ "http://169.254.169.254/%s/user-data" % (self.VERSION),
+ status=500,
+ )
userdata = eu.get_instance_userdata(self.VERSION, retries=0)
- self.assertEqual('', userdata)
+ self.assertEqual("", userdata)
def test_userdata_fetch_fail_server_not_found(self):
- hp.register_uri(hp.GET,
- 'http://169.254.169.254/%s/user-data' % (self.VERSION),
- status=404)
+ hp.register_uri(
+ hp.GET,
+ "http://169.254.169.254/%s/user-data" % (self.VERSION),
+ status=404,
+ )
userdata = eu.get_instance_userdata(self.VERSION)
- self.assertEqual('', userdata)
+ self.assertEqual("", userdata)
def test_metadata_fetch_no_keys(self):
- base_url = 'http://169.254.169.254/%s/meta-data/' % (self.VERSION)
- hp.register_uri(hp.GET, base_url, status=200,
- body="\n".join(['hostname',
- 'instance-id',
- 'ami-launch-index']))
- hp.register_uri(hp.GET, uh.combine_url(base_url, 'hostname'),
- status=200, body='ec2.fake.host.name.com')
- hp.register_uri(hp.GET, uh.combine_url(base_url, 'instance-id'),
- status=200, body='123')
- hp.register_uri(hp.GET, uh.combine_url(base_url, 'ami-launch-index'),
- status=200, body='1')
+ base_url = "http://169.254.169.254/%s/meta-data/" % (self.VERSION)
+ hp.register_uri(
+ hp.GET,
+ base_url,
+ status=200,
+ body="\n".join(["hostname", "instance-id", "ami-launch-index"]),
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "hostname"),
+ status=200,
+ body="ec2.fake.host.name.com",
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "instance-id"),
+ status=200,
+ body="123",
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "ami-launch-index"),
+ status=200,
+ body="1",
+ )
md = eu.get_instance_metadata(self.VERSION, retries=0)
- self.assertEqual(md['hostname'], 'ec2.fake.host.name.com')
- self.assertEqual(md['instance-id'], '123')
- self.assertEqual(md['ami-launch-index'], '1')
+ self.assertEqual(md["hostname"], "ec2.fake.host.name.com")
+ self.assertEqual(md["instance-id"], "123")
+ self.assertEqual(md["ami-launch-index"], "1")
def test_metadata_fetch_key(self):
- base_url = 'http://169.254.169.254/%s/meta-data/' % (self.VERSION)
- hp.register_uri(hp.GET, base_url, status=200,
- body="\n".join(['hostname',
- 'instance-id',
- 'public-keys/']))
- hp.register_uri(hp.GET, uh.combine_url(base_url, 'hostname'),
- status=200, body='ec2.fake.host.name.com')
- hp.register_uri(hp.GET, uh.combine_url(base_url, 'instance-id'),
- status=200, body='123')
- hp.register_uri(hp.GET, uh.combine_url(base_url, 'public-keys/'),
- status=200, body='0=my-public-key')
- hp.register_uri(hp.GET,
- uh.combine_url(base_url, 'public-keys/0/openssh-key'),
- status=200, body='ssh-rsa AAAA.....wZEf my-public-key')
+ base_url = "http://169.254.169.254/%s/meta-data/" % (self.VERSION)
+ hp.register_uri(
+ hp.GET,
+ base_url,
+ status=200,
+ body="\n".join(["hostname", "instance-id", "public-keys/"]),
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "hostname"),
+ status=200,
+ body="ec2.fake.host.name.com",
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "instance-id"),
+ status=200,
+ body="123",
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "public-keys/"),
+ status=200,
+ body="0=my-public-key",
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "public-keys/0/openssh-key"),
+ status=200,
+ body="ssh-rsa AAAA.....wZEf my-public-key",
+ )
md = eu.get_instance_metadata(self.VERSION, retries=0, timeout=0.1)
- self.assertEqual(md['hostname'], 'ec2.fake.host.name.com')
- self.assertEqual(md['instance-id'], '123')
- self.assertEqual(1, len(md['public-keys']))
+ self.assertEqual(md["hostname"], "ec2.fake.host.name.com")
+ self.assertEqual(md["instance-id"], "123")
+ self.assertEqual(1, len(md["public-keys"]))
def test_metadata_fetch_with_2_keys(self):
- base_url = 'http://169.254.169.254/%s/meta-data/' % (self.VERSION)
- hp.register_uri(hp.GET, base_url, status=200,
- body="\n".join(['hostname',
- 'instance-id',
- 'public-keys/']))
- hp.register_uri(hp.GET, uh.combine_url(base_url, 'hostname'),
- status=200, body='ec2.fake.host.name.com')
- hp.register_uri(hp.GET, uh.combine_url(base_url, 'instance-id'),
- status=200, body='123')
- hp.register_uri(hp.GET, uh.combine_url(base_url, 'public-keys/'),
- status=200,
- body="\n".join(['0=my-public-key', '1=my-other-key']))
- hp.register_uri(hp.GET,
- uh.combine_url(base_url, 'public-keys/0/openssh-key'),
- status=200, body='ssh-rsa AAAA.....wZEf my-public-key')
- hp.register_uri(hp.GET,
- uh.combine_url(base_url, 'public-keys/1/openssh-key'),
- status=200, body='ssh-rsa AAAA.....wZEf my-other-key')
+ base_url = "http://169.254.169.254/%s/meta-data/" % (self.VERSION)
+ hp.register_uri(
+ hp.GET,
+ base_url,
+ status=200,
+ body="\n".join(["hostname", "instance-id", "public-keys/"]),
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "hostname"),
+ status=200,
+ body="ec2.fake.host.name.com",
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "instance-id"),
+ status=200,
+ body="123",
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "public-keys/"),
+ status=200,
+ body="\n".join(["0=my-public-key", "1=my-other-key"]),
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "public-keys/0/openssh-key"),
+ status=200,
+ body="ssh-rsa AAAA.....wZEf my-public-key",
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "public-keys/1/openssh-key"),
+ status=200,
+ body="ssh-rsa AAAA.....wZEf my-other-key",
+ )
md = eu.get_instance_metadata(self.VERSION, retries=0, timeout=0.1)
- self.assertEqual(md['hostname'], 'ec2.fake.host.name.com')
- self.assertEqual(md['instance-id'], '123')
- self.assertEqual(2, len(md['public-keys']))
+ self.assertEqual(md["hostname"], "ec2.fake.host.name.com")
+ self.assertEqual(md["instance-id"], "123")
+ self.assertEqual(2, len(md["public-keys"]))
def test_metadata_fetch_bdm(self):
- base_url = 'http://169.254.169.254/%s/meta-data/' % (self.VERSION)
- hp.register_uri(hp.GET, base_url, status=200,
- body="\n".join(['hostname',
- 'instance-id',
- 'block-device-mapping/']))
- hp.register_uri(hp.GET, uh.combine_url(base_url, 'hostname'),
- status=200, body='ec2.fake.host.name.com')
- hp.register_uri(hp.GET, uh.combine_url(base_url, 'instance-id'),
- status=200, body='123')
- hp.register_uri(hp.GET,
- uh.combine_url(base_url, 'block-device-mapping/'),
- status=200,
- body="\n".join(['ami', 'ephemeral0']))
- hp.register_uri(hp.GET,
- uh.combine_url(base_url, 'block-device-mapping/ami'),
- status=200,
- body="sdb")
- hp.register_uri(hp.GET,
- uh.combine_url(base_url,
- 'block-device-mapping/ephemeral0'),
- status=200,
- body="sdc")
+ base_url = "http://169.254.169.254/%s/meta-data/" % (self.VERSION)
+ hp.register_uri(
+ hp.GET,
+ base_url,
+ status=200,
+ body="\n".join(
+ ["hostname", "instance-id", "block-device-mapping/"]
+ ),
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "hostname"),
+ status=200,
+ body="ec2.fake.host.name.com",
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "instance-id"),
+ status=200,
+ body="123",
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "block-device-mapping/"),
+ status=200,
+ body="\n".join(["ami", "ephemeral0"]),
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "block-device-mapping/ami"),
+ status=200,
+ body="sdb",
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "block-device-mapping/ephemeral0"),
+ status=200,
+ body="sdc",
+ )
md = eu.get_instance_metadata(self.VERSION, retries=0, timeout=0.1)
- self.assertEqual(md['hostname'], 'ec2.fake.host.name.com')
- self.assertEqual(md['instance-id'], '123')
- bdm = md['block-device-mapping']
+ self.assertEqual(md["hostname"], "ec2.fake.host.name.com")
+ self.assertEqual(md["instance-id"], "123")
+ bdm = md["block-device-mapping"]
self.assertEqual(2, len(bdm))
- self.assertEqual(bdm['ami'], 'sdb')
- self.assertEqual(bdm['ephemeral0'], 'sdc')
+ self.assertEqual(bdm["ami"], "sdb")
+ self.assertEqual(bdm["ephemeral0"], "sdc")
def test_metadata_no_security_credentials(self):
- base_url = 'http://169.254.169.254/%s/meta-data/' % (self.VERSION)
- hp.register_uri(hp.GET, base_url, status=200,
- body="\n".join(['instance-id',
- 'iam/']))
- hp.register_uri(hp.GET, uh.combine_url(base_url, 'instance-id'),
- status=200, body='i-0123451689abcdef0')
- hp.register_uri(hp.GET,
- uh.combine_url(base_url, 'iam/'),
- status=200,
- body="\n".join(['info/', 'security-credentials/']))
- hp.register_uri(hp.GET,
- uh.combine_url(base_url, 'iam/info/'),
- status=200,
- body='LastUpdated')
- hp.register_uri(hp.GET,
- uh.combine_url(base_url, 'iam/info/LastUpdated'),
- status=200, body='2016-10-27T17:29:39Z')
- hp.register_uri(hp.GET,
- uh.combine_url(base_url, 'iam/security-credentials/'),
- status=200,
- body='ReadOnly/')
- hp.register_uri(hp.GET,
- uh.combine_url(base_url,
- 'iam/security-credentials/ReadOnly/'),
- status=200,
- body="\n".join(['LastUpdated', 'Expiration']))
- hp.register_uri(hp.GET,
- uh.combine_url(
- base_url,
- 'iam/security-credentials/ReadOnly/LastUpdated'),
- status=200, body='2016-10-27T17:28:17Z')
- hp.register_uri(hp.GET,
- uh.combine_url(
- base_url,
- 'iam/security-credentials/ReadOnly/Expiration'),
- status=200, body='2016-10-28T00:00:34Z')
+ base_url = "http://169.254.169.254/%s/meta-data/" % (self.VERSION)
+ hp.register_uri(
+ hp.GET,
+ base_url,
+ status=200,
+ body="\n".join(["instance-id", "iam/"]),
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "instance-id"),
+ status=200,
+ body="i-0123451689abcdef0",
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "iam/"),
+ status=200,
+ body="\n".join(["info/", "security-credentials/"]),
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "iam/info/"),
+ status=200,
+ body="LastUpdated",
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "iam/info/LastUpdated"),
+ status=200,
+ body="2016-10-27T17:29:39Z",
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "iam/security-credentials/"),
+ status=200,
+ body="ReadOnly/",
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(base_url, "iam/security-credentials/ReadOnly/"),
+ status=200,
+ body="\n".join(["LastUpdated", "Expiration"]),
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(
+ base_url, "iam/security-credentials/ReadOnly/LastUpdated"
+ ),
+ status=200,
+ body="2016-10-27T17:28:17Z",
+ )
+ hp.register_uri(
+ hp.GET,
+ uh.combine_url(
+ base_url, "iam/security-credentials/ReadOnly/Expiration"
+ ),
+ status=200,
+ body="2016-10-28T00:00:34Z",
+ )
md = eu.get_instance_metadata(self.VERSION, retries=0, timeout=0.1)
- self.assertEqual(md['instance-id'], 'i-0123451689abcdef0')
- iam = md['iam']
+ self.assertEqual(md["instance-id"], "i-0123451689abcdef0")
+ iam = md["iam"]
self.assertEqual(1, len(iam))
- self.assertEqual(iam['info']['LastUpdated'], '2016-10-27T17:29:39Z')
- self.assertNotIn('security-credentials', iam)
+ self.assertEqual(iam["info"]["LastUpdated"], "2016-10-27T17:29:39Z")
+ self.assertNotIn("security-credentials", iam)
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_event.py b/tests/unittests/test_event.py
new file mode 100644
index 00000000..2ea91bb2
--- /dev/null
+++ b/tests/unittests/test_event.py
@@ -0,0 +1,26 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+"""Tests related to cloudinit.event module."""
+from cloudinit.event import EventScope, EventType, userdata_to_events
+
+
+class TestEvent:
+ def test_userdata_to_events(self):
+ userdata = {"network": {"when": ["boot"]}}
+ expected = {EventScope.NETWORK: {EventType.BOOT}}
+ assert expected == userdata_to_events(userdata)
+
+ def test_invalid_scope(self, caplog):
+ userdata = {"networkasdfasdf": {"when": ["boot"]}}
+ userdata_to_events(userdata)
+ assert (
+ "'networkasdfasdf' is not a valid EventScope! Update data "
+ "will be ignored for 'networkasdfasdf' scope" in caplog.text
+ )
+
+ def test_invalid_event(self, caplog):
+ userdata = {"network": {"when": ["bootasdfasdf"]}}
+ userdata_to_events(userdata)
+ assert (
+ "'bootasdfasdf' is not a valid EventType! Update data "
+ "will be ignored for 'network' scope" in caplog.text
+ )
diff --git a/tests/unittests/test_features.py b/tests/unittests/test_features.py
new file mode 100644
index 00000000..794a9654
--- /dev/null
+++ b/tests/unittests/test_features.py
@@ -0,0 +1,68 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+# pylint: disable=no-member,no-name-in-module
+"""
+This file is for testing the feature flag functionality itself,
+NOT for testing any individual feature flag
+"""
+import sys
+from pathlib import Path
+
+import pytest
+
+import cloudinit
+
+
+@pytest.fixture()
+def create_override(request):
+ """
+ Create a feature overrides file and do some module wizardry to make
+ it seem like we're importing the features file for the first time.
+
+ After creating the override file with the values passed by the test,
+ we need to reload cloudinit.features
+ to get all of the current features (including the overridden ones).
+ Once the test is complete, we remove the file we created and set
+ features and feature_overrides modules to how they were before
+ the test started
+ """
+ override_path = Path(cloudinit.__file__).parent / "feature_overrides.py"
+ if override_path.exists():
+ raise Exception(
+ "feature_overrides.py unexpectedly exists! "
+ "Remove it to run this test."
+ )
+ with override_path.open("w") as f:
+ for key, value in request.param.items():
+ f.write("{} = {}\n".format(key, value))
+
+ sys.modules.pop("cloudinit.features", None)
+
+ yield
+
+ override_path.unlink()
+ sys.modules.pop("cloudinit.feature_overrides", None)
+
+
+class TestFeatures:
+ def test_feature_without_override(self):
+ from cloudinit.features import ERROR_ON_USER_DATA_FAILURE
+
+ assert ERROR_ON_USER_DATA_FAILURE is True
+
+ @pytest.mark.parametrize(
+ "create_override",
+ [{"ERROR_ON_USER_DATA_FAILURE": False}],
+ indirect=True,
+ )
+ def test_feature_with_override(self, create_override):
+ from cloudinit.features import ERROR_ON_USER_DATA_FAILURE
+
+ assert ERROR_ON_USER_DATA_FAILURE is False
+
+ @pytest.mark.parametrize(
+ "create_override", [{"SPAM": True}], indirect=True
+ )
+ def test_feature_only_in_override(self, create_override):
+ from cloudinit.features import SPAM
+
+ assert SPAM is True
diff --git a/tests/unittests/test_gpg.py b/tests/unittests/test_gpg.py
new file mode 100644
index 00000000..c3772e3f
--- /dev/null
+++ b/tests/unittests/test_gpg.py
@@ -0,0 +1,139 @@
+from unittest import mock
+
+import pytest
+
+from cloudinit import gpg, subp
+from tests.unittests.helpers import CiTestCase
+
+TEST_KEY_HUMAN = """
+/etc/apt/cloud-init.gpg.d/my_key.gpg
+--------------------------------------------
+pub rsa4096 2021-10-22 [SC]
+ 3A3E F34D FDED B3B7 F3FD F603 F83F 7712 9A5E BD85
+uid [ unknown] Brett Holman <brett.holman@canonical.com>
+sub rsa4096 2021-10-22 [A]
+sub rsa4096 2021-10-22 [E]
+"""
+
+TEST_KEY_MACHINE = """
+tru::1:1635129362:0:3:1:5
+pub:-:4096:1:F83F77129A5EBD85:1634912922:::-:::scESCA::::::23::0:
+fpr:::::::::3A3EF34DFDEDB3B7F3FDF603F83F77129A5EBD85:
+uid:-::::1634912922::64F1F1D6FA96316752D635D7C6406C52C40713C7::Brett Holman \
+<brett.holman@canonical.com>::::::::::0:
+sub:-:4096:1:544B39C9A9141F04:1634912922::::::a::::::23:
+fpr:::::::::8BD901490D6EC986D03D6F0D544B39C9A9141F04:
+sub:-:4096:1:F45D9443F0A87092:1634912922::::::e::::::23:
+fpr:::::::::8CCCB332317324F030A45B19F45D9443F0A87092:
+"""
+
+TEST_KEY_FINGERPRINT_HUMAN = (
+ "3A3E F34D FDED B3B7 F3FD F603 F83F 7712 9A5E BD85"
+)
+
+TEST_KEY_FINGERPRINT_MACHINE = "3A3EF34DFDEDB3B7F3FDF603F83F77129A5EBD85"
+
+
+class TestGPGCommands:
+ def test_dearmor_bad_value(self):
+ """This exception is handled by the callee. Ensure it is not caught
+ internally.
+ """
+ with mock.patch.object(
+ subp, "subp", side_effect=subp.ProcessExecutionError
+ ):
+ with pytest.raises(subp.ProcessExecutionError):
+ gpg.dearmor("garbage key value")
+
+ def test_gpg_list_args(self):
+ """Verify correct command gets called to list keys"""
+ no_colons = [
+ "gpg",
+ "--with-fingerprint",
+ "--no-default-keyring",
+ "--list-keys",
+ "--keyring",
+ "key",
+ ]
+ colons = [
+ "gpg",
+ "--with-fingerprint",
+ "--no-default-keyring",
+ "--list-keys",
+ "--keyring",
+ "--with-colons",
+ "key",
+ ]
+ with mock.patch.object(subp, "subp", return_value=("", "")) as m_subp:
+ gpg.list("key")
+ assert mock.call(colons, capture=True) == m_subp.call_args
+
+ gpg.list("key", human_output=True)
+ test_calls = mock.call((no_colons), capture=True)
+ assert test_calls == m_subp.call_args
+
+ def test_gpg_dearmor_args(self):
+ """Verify correct command gets called to dearmor keys"""
+ with mock.patch.object(subp, "subp", return_value=("", "")) as m_subp:
+ gpg.dearmor("key")
+ test_call = mock.call(
+ ["gpg", "--dearmor"], data="key", decode=False
+ )
+ assert test_call == m_subp.call_args
+
+ @mock.patch("cloudinit.gpg.time.sleep")
+ @mock.patch("cloudinit.gpg.subp.subp")
+ class TestReceiveKeys(CiTestCase):
+ """Test the recv_key method."""
+
+ def test_retries_on_subp_exc(self, m_subp, m_sleep):
+ """retry should be done on gpg receive keys failure."""
+ retries = (1, 2, 4)
+ my_exc = subp.ProcessExecutionError(
+ stdout="", stderr="", exit_code=2, cmd=["mycmd"]
+ )
+ m_subp.side_effect = (my_exc, my_exc, ("", ""))
+ gpg.recv_key("ABCD", "keyserver.example.com", retries=retries)
+ self.assertEqual(
+ [mock.call(1), mock.call(2)], m_sleep.call_args_list
+ )
+
+ def test_raises_error_after_retries(self, m_subp, m_sleep):
+ """If the final run fails, error should be raised."""
+ naplen = 1
+ keyid, keyserver = ("ABCD", "keyserver.example.com")
+ m_subp.side_effect = subp.ProcessExecutionError(
+ stdout="", stderr="", exit_code=2, cmd=["mycmd"]
+ )
+ with self.assertRaises(ValueError) as rcm:
+ gpg.recv_key(keyid, keyserver, retries=(naplen,))
+ self.assertIn(keyid, str(rcm.exception))
+ self.assertIn(keyserver, str(rcm.exception))
+ m_sleep.assert_called_with(naplen)
+
+ def test_no_retries_on_none(self, m_subp, m_sleep):
+ """retry should not be done if retries is None."""
+ m_subp.side_effect = subp.ProcessExecutionError(
+ stdout="", stderr="", exit_code=2, cmd=["mycmd"]
+ )
+ with self.assertRaises(ValueError):
+ gpg.recv_key("ABCD", "keyserver.example.com", retries=None)
+ m_sleep.assert_not_called()
+
+ def test_expected_gpg_command(self, m_subp, m_sleep):
+ """Verify gpg is called with expected args."""
+ key, keyserver = ("DEADBEEF", "keyserver.example.com")
+ retries = (1, 2, 4)
+ m_subp.return_value = ("", "")
+ gpg.recv_key(key, keyserver, retries=retries)
+ m_subp.assert_called_once_with(
+ [
+ "gpg",
+ "--no-tty",
+ "--keyserver=%s" % keyserver,
+ "--recv-keys",
+ key,
+ ],
+ capture=True,
+ )
+ m_sleep.assert_not_called()
diff --git a/tests/unittests/test_handler/test_handler_apk_configure.py b/tests/unittests/test_handler/test_handler_apk_configure.py
deleted file mode 100644
index 8acc0b33..00000000
--- a/tests/unittests/test_handler/test_handler_apk_configure.py
+++ /dev/null
@@ -1,299 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-""" test_apk_configure
-Test creation of repositories file
-"""
-
-import logging
-import os
-import textwrap
-
-from cloudinit import (cloud, helpers, util)
-
-from cloudinit.config import cc_apk_configure
-from cloudinit.tests.helpers import (FilesystemMockingTestCase, mock)
-
-REPO_FILE = "/etc/apk/repositories"
-DEFAULT_MIRROR_URL = "https://alpine.global.ssl.fastly.net/alpine"
-CC_APK = 'cloudinit.config.cc_apk_configure'
-
-
-class TestNoConfig(FilesystemMockingTestCase):
- def setUp(self):
- super(TestNoConfig, self).setUp()
- self.add_patch(CC_APK + '._write_repositories_file', 'm_write_repos')
- self.name = "apk-configure"
- self.cloud_init = None
- self.log = logging.getLogger("TestNoConfig")
- self.args = []
-
- def test_no_config(self):
- """
- Test that nothing is done if no apk-configure
- configuration is provided.
- """
- config = util.get_builtin_cfg()
-
- cc_apk_configure.handle(self.name, config, self.cloud_init,
- self.log, self.args)
-
- self.assertEqual(0, self.m_write_repos.call_count)
-
-
-class TestConfig(FilesystemMockingTestCase):
- def setUp(self):
- super(TestConfig, self).setUp()
- self.new_root = self.tmp_dir()
- self.new_root = self.reRoot(root=self.new_root)
- for dirname in ['tmp', 'etc/apk']:
- util.ensure_dir(os.path.join(self.new_root, dirname))
- self.paths = helpers.Paths({'templates_dir': self.new_root})
- self.name = "apk-configure"
- self.cloud = cloud.Cloud(None, self.paths, None, None, None)
- self.log = logging.getLogger("TestNoConfig")
- self.args = []
-
- @mock.patch(CC_APK + '._write_repositories_file')
- def test_no_repo_settings(self, m_write_repos):
- """
- Test that nothing is written if the 'alpine-repo' key
- is not present.
- """
- config = {"apk_repos": {}}
-
- cc_apk_configure.handle(self.name, config, self.cloud, self.log,
- self.args)
-
- self.assertEqual(0, m_write_repos.call_count)
-
- @mock.patch(CC_APK + '._write_repositories_file')
- def test_empty_repo_settings(self, m_write_repos):
- """
- Test that nothing is written if 'alpine_repo' list is empty.
- """
- config = {"apk_repos": {"alpine_repo": []}}
-
- cc_apk_configure.handle(self.name, config, self.cloud, self.log,
- self.args)
-
- self.assertEqual(0, m_write_repos.call_count)
-
- def test_only_main_repo(self):
- """
- Test when only details of main repo is written to file.
- """
- alpine_version = 'v3.12'
- config = {
- "apk_repos": {
- "alpine_repo": {
- "version": alpine_version
- }
- }
- }
-
- cc_apk_configure.handle(self.name, config, self.cloud, self.log,
- self.args)
-
- expected_content = textwrap.dedent("""\
- #
- # Created by cloud-init
- #
- # This file is written on first boot of an instance
- #
-
- {0}/{1}/main
-
- """.format(DEFAULT_MIRROR_URL, alpine_version))
-
- self.assertEqual(expected_content, util.load_file(REPO_FILE))
-
- def test_main_and_community_repos(self):
- """
- Test when only details of main and community repos are
- written to file.
- """
- alpine_version = 'edge'
- config = {
- "apk_repos": {
- "alpine_repo": {
- "version": alpine_version,
- "community_enabled": True
- }
- }
- }
-
- cc_apk_configure.handle(self.name, config, self.cloud, self.log,
- self.args)
-
- expected_content = textwrap.dedent("""\
- #
- # Created by cloud-init
- #
- # This file is written on first boot of an instance
- #
-
- {0}/{1}/main
- {0}/{1}/community
-
- """.format(DEFAULT_MIRROR_URL, alpine_version))
-
- self.assertEqual(expected_content, util.load_file(REPO_FILE))
-
- def test_main_community_testing_repos(self):
- """
- Test when details of main, community and testing repos
- are written to file.
- """
- alpine_version = 'v3.12'
- config = {
- "apk_repos": {
- "alpine_repo": {
- "version": alpine_version,
- "community_enabled": True,
- "testing_enabled": True
- }
- }
- }
-
- cc_apk_configure.handle(self.name, config, self.cloud, self.log,
- self.args)
-
- expected_content = textwrap.dedent("""\
- #
- # Created by cloud-init
- #
- # This file is written on first boot of an instance
- #
-
- {0}/{1}/main
- {0}/{1}/community
- #
- # Testing - using with non-Edge installation may cause problems!
- #
- {0}/edge/testing
-
- """.format(DEFAULT_MIRROR_URL, alpine_version))
-
- self.assertEqual(expected_content, util.load_file(REPO_FILE))
-
- def test_edge_main_community_testing_repos(self):
- """
- Test when details of main, community and testing repos
- for Edge version of Alpine are written to file.
- """
- alpine_version = 'edge'
- config = {
- "apk_repos": {
- "alpine_repo": {
- "version": alpine_version,
- "community_enabled": True,
- "testing_enabled": True
- }
- }
- }
-
- cc_apk_configure.handle(self.name, config, self.cloud, self.log,
- self.args)
-
- expected_content = textwrap.dedent("""\
- #
- # Created by cloud-init
- #
- # This file is written on first boot of an instance
- #
-
- {0}/{1}/main
- {0}/{1}/community
- {0}/{1}/testing
-
- """.format(DEFAULT_MIRROR_URL, alpine_version))
-
- self.assertEqual(expected_content, util.load_file(REPO_FILE))
-
- def test_main_community_testing_local_repos(self):
- """
- Test when details of main, community, testing and
- local repos are written to file.
- """
- alpine_version = 'v3.12'
- local_repo_url = 'http://some.mirror/whereever'
- config = {
- "apk_repos": {
- "alpine_repo": {
- "version": alpine_version,
- "community_enabled": True,
- "testing_enabled": True
- },
- "local_repo_base_url": local_repo_url
- }
- }
-
- cc_apk_configure.handle(self.name, config, self.cloud, self.log,
- self.args)
-
- expected_content = textwrap.dedent("""\
- #
- # Created by cloud-init
- #
- # This file is written on first boot of an instance
- #
-
- {0}/{1}/main
- {0}/{1}/community
- #
- # Testing - using with non-Edge installation may cause problems!
- #
- {0}/edge/testing
-
- #
- # Local repo
- #
- {2}/{1}
-
- """.format(DEFAULT_MIRROR_URL, alpine_version, local_repo_url))
-
- self.assertEqual(expected_content, util.load_file(REPO_FILE))
-
- def test_edge_main_community_testing_local_repos(self):
- """
- Test when details of main, community, testing and local repos
- for Edge version of Alpine are written to file.
- """
- alpine_version = 'edge'
- local_repo_url = 'http://some.mirror/whereever'
- config = {
- "apk_repos": {
- "alpine_repo": {
- "version": alpine_version,
- "community_enabled": True,
- "testing_enabled": True
- },
- "local_repo_base_url": local_repo_url
- }
- }
-
- cc_apk_configure.handle(self.name, config, self.cloud, self.log,
- self.args)
-
- expected_content = textwrap.dedent("""\
- #
- # Created by cloud-init
- #
- # This file is written on first boot of an instance
- #
-
- {0}/{1}/main
- {0}/{1}/community
- {0}/edge/testing
-
- #
- # Local repo
- #
- {2}/{1}
-
- """.format(DEFAULT_MIRROR_URL, alpine_version, local_repo_url))
-
- self.assertEqual(expected_content, util.load_file(REPO_FILE))
-
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_apt_source_v1.py b/tests/unittests/test_handler/test_handler_apt_source_v1.py
deleted file mode 100644
index 367971cb..00000000
--- a/tests/unittests/test_handler/test_handler_apt_source_v1.py
+++ /dev/null
@@ -1,626 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-""" test_handler_apt_source_v1
-Testing various config variations of the apt_source config
-This calls all things with v1 format to stress the conversion code on top of
-the actually tested code.
-"""
-import os
-import re
-import shutil
-import tempfile
-from unittest import mock
-from unittest.mock import call
-
-from cloudinit.config import cc_apt_configure
-from cloudinit import gpg
-from cloudinit import subp
-from cloudinit import util
-
-from cloudinit.tests.helpers import TestCase
-
-EXPECTEDKEY = """-----BEGIN PGP PUBLIC KEY BLOCK-----
-Version: GnuPG v1
-
-mI0ESuZLUgEEAKkqq3idtFP7g9hzOu1a8+v8ImawQN4TrvlygfScMU1TIS1eC7UQ
-NUA8Qqgr9iUaGnejb0VciqftLrU9D6WYHSKz+EITefgdyJ6SoQxjoJdsCpJ7o9Jy
-8PQnpRttiFm4qHu6BVnKnBNxw/z3ST9YMqW5kbMQpfxbGe+obRox59NpABEBAAG0
-HUxhdW5jaHBhZCBQUEEgZm9yIFNjb3R0IE1vc2VyiLYEEwECACAFAkrmS1ICGwMG
-CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRAGILvPA2g/d3aEA/9tVjc10HOZwV29
-OatVuTeERjjrIbxflO586GLA8cp0C9RQCwgod/R+cKYdQcHjbqVcP0HqxveLg0RZ
-FJpWLmWKamwkABErwQLGlM/Hwhjfade8VvEQutH5/0JgKHmzRsoqfR+LMO6OS+Sm
-S0ORP6HXET3+jC8BMG4tBWCTK/XEZw==
-=ACB2
------END PGP PUBLIC KEY BLOCK-----"""
-
-ADD_APT_REPO_MATCH = r"^[\w-]+:\w"
-
-
-class FakeDistro(object):
- """Fake Distro helper object"""
- def update_package_sources(self):
- """Fake update_package_sources helper method"""
- return
-
-
-class FakeDatasource:
- """Fake Datasource helper object"""
- def __init__(self):
- self.region = 'region'
-
-
-class FakeCloud(object):
- """Fake Cloud helper object"""
- def __init__(self):
- self.distro = FakeDistro()
- self.datasource = FakeDatasource()
-
-
-class TestAptSourceConfig(TestCase):
- """TestAptSourceConfig
- Main Class to test apt_source configs
- """
- release = "fantastic"
-
- def setUp(self):
- super(TestAptSourceConfig, self).setUp()
- self.tmp = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.tmp)
- self.aptlistfile = os.path.join(self.tmp, "single-deb.list")
- self.aptlistfile2 = os.path.join(self.tmp, "single-deb2.list")
- self.aptlistfile3 = os.path.join(self.tmp, "single-deb3.list")
- self.join = os.path.join
- self.matcher = re.compile(ADD_APT_REPO_MATCH).search
- # mock fallback filename into writable tmp dir
- self.fallbackfn = os.path.join(self.tmp, "etc/apt/sources.list.d/",
- "cloud_config_sources.list")
-
- self.fakecloud = FakeCloud()
-
- rpatcher = mock.patch("cloudinit.util.lsb_release")
- get_rel = rpatcher.start()
- get_rel.return_value = {'codename': self.release}
- self.addCleanup(rpatcher.stop)
- apatcher = mock.patch("cloudinit.util.get_dpkg_architecture")
- get_arch = apatcher.start()
- get_arch.return_value = 'amd64'
- self.addCleanup(apatcher.stop)
-
- def _get_default_params(self):
- """get_default_params
- Get the most basic default mrror and release info to be used in tests
- """
- params = {}
- params['RELEASE'] = self.release
- params['MIRROR'] = "http://archive.ubuntu.com/ubuntu"
- return params
-
- def wrapv1conf(self, cfg):
- params = self._get_default_params()
- # old v1 list format under old keys, but callabe to main handler
- # disable source.list rendering and set mirror to avoid other code
- return {'apt_preserve_sources_list': True,
- 'apt_mirror': params['MIRROR'],
- 'apt_sources': cfg}
-
- def myjoin(self, *args, **kwargs):
- """myjoin - redir into writable tmpdir"""
- if (args[0] == "/etc/apt/sources.list.d/" and
- args[1] == "cloud_config_sources.list" and
- len(args) == 2):
- return self.join(self.tmp, args[0].lstrip("/"), args[1])
- else:
- return self.join(*args, **kwargs)
-
- def apt_src_basic(self, filename, cfg):
- """apt_src_basic
- Test Fix deb source string, has to overwrite mirror conf in params
- """
- cfg = self.wrapv1conf(cfg)
-
- cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
-
- self.assertTrue(os.path.isfile(filename))
-
- contents = util.load_file(filename)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb", "http://archive.ubuntu.com/ubuntu",
- "karmic-backports",
- "main universe multiverse restricted"),
- contents, flags=re.IGNORECASE))
-
- def test_apt_src_basic(self):
- """Test deb source string, overwrite mirror and filename"""
- cfg = {'source': ('deb http://archive.ubuntu.com/ubuntu'
- ' karmic-backports'
- ' main universe multiverse restricted'),
- 'filename': self.aptlistfile}
- self.apt_src_basic(self.aptlistfile, [cfg])
-
- def test_apt_src_basic_dict(self):
- """Test deb source string, overwrite mirror and filename (dict)"""
- cfg = {self.aptlistfile: {'source':
- ('deb http://archive.ubuntu.com/ubuntu'
- ' karmic-backports'
- ' main universe multiverse restricted')}}
- self.apt_src_basic(self.aptlistfile, cfg)
-
- def apt_src_basic_tri(self, cfg):
- """apt_src_basic_tri
- Test Fix three deb source string, has to overwrite mirror conf in
- params. Test with filenames provided in config.
- generic part to check three files with different content
- """
- self.apt_src_basic(self.aptlistfile, cfg)
-
- # extra verify on two extra files of this test
- contents = util.load_file(self.aptlistfile2)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb", "http://archive.ubuntu.com/ubuntu",
- "precise-backports",
- "main universe multiverse restricted"),
- contents, flags=re.IGNORECASE))
- contents = util.load_file(self.aptlistfile3)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb", "http://archive.ubuntu.com/ubuntu",
- "lucid-backports",
- "main universe multiverse restricted"),
- contents, flags=re.IGNORECASE))
-
- def test_apt_src_basic_tri(self):
- """Test Fix three deb source string with filenames"""
- cfg1 = {'source': ('deb http://archive.ubuntu.com/ubuntu'
- ' karmic-backports'
- ' main universe multiverse restricted'),
- 'filename': self.aptlistfile}
- cfg2 = {'source': ('deb http://archive.ubuntu.com/ubuntu'
- ' precise-backports'
- ' main universe multiverse restricted'),
- 'filename': self.aptlistfile2}
- cfg3 = {'source': ('deb http://archive.ubuntu.com/ubuntu'
- ' lucid-backports'
- ' main universe multiverse restricted'),
- 'filename': self.aptlistfile3}
- self.apt_src_basic_tri([cfg1, cfg2, cfg3])
-
- def test_apt_src_basic_dict_tri(self):
- """Test Fix three deb source string with filenames (dict)"""
- cfg = {self.aptlistfile: {'source':
- ('deb http://archive.ubuntu.com/ubuntu'
- ' karmic-backports'
- ' main universe multiverse restricted')},
- self.aptlistfile2: {'source':
- ('deb http://archive.ubuntu.com/ubuntu'
- ' precise-backports'
- ' main universe multiverse restricted')},
- self.aptlistfile3: {'source':
- ('deb http://archive.ubuntu.com/ubuntu'
- ' lucid-backports'
- ' main universe multiverse restricted')}}
- self.apt_src_basic_tri(cfg)
-
- def test_apt_src_basic_nofn(self):
- """Test Fix three deb source string without filenames (dict)"""
- cfg = {'source': ('deb http://archive.ubuntu.com/ubuntu'
- ' karmic-backports'
- ' main universe multiverse restricted')}
- with mock.patch.object(os.path, 'join', side_effect=self.myjoin):
- self.apt_src_basic(self.fallbackfn, [cfg])
-
- def apt_src_replacement(self, filename, cfg):
- """apt_src_replace
- Test Autoreplacement of MIRROR and RELEASE in source specs
- """
- cfg = self.wrapv1conf(cfg)
- params = self._get_default_params()
- cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
-
- self.assertTrue(os.path.isfile(filename))
-
- contents = util.load_file(filename)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb", params['MIRROR'], params['RELEASE'],
- "multiverse"),
- contents, flags=re.IGNORECASE))
-
- def test_apt_src_replace(self):
- """Test Autoreplacement of MIRROR and RELEASE in source specs"""
- cfg = {'source': 'deb $MIRROR $RELEASE multiverse',
- 'filename': self.aptlistfile}
- self.apt_src_replacement(self.aptlistfile, [cfg])
-
- def apt_src_replace_tri(self, cfg):
- """apt_src_replace_tri
- Test three autoreplacements of MIRROR and RELEASE in source specs with
- generic part
- """
- self.apt_src_replacement(self.aptlistfile, cfg)
-
- # extra verify on two extra files of this test
- params = self._get_default_params()
- contents = util.load_file(self.aptlistfile2)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb", params['MIRROR'], params['RELEASE'],
- "main"),
- contents, flags=re.IGNORECASE))
- contents = util.load_file(self.aptlistfile3)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb", params['MIRROR'], params['RELEASE'],
- "universe"),
- contents, flags=re.IGNORECASE))
-
- def test_apt_src_replace_tri(self):
- """Test triple Autoreplacement of MIRROR and RELEASE in source specs"""
- cfg1 = {'source': 'deb $MIRROR $RELEASE multiverse',
- 'filename': self.aptlistfile}
- cfg2 = {'source': 'deb $MIRROR $RELEASE main',
- 'filename': self.aptlistfile2}
- cfg3 = {'source': 'deb $MIRROR $RELEASE universe',
- 'filename': self.aptlistfile3}
- self.apt_src_replace_tri([cfg1, cfg2, cfg3])
-
- def test_apt_src_replace_dict_tri(self):
- """Test triple Autoreplacement in source specs (dict)"""
- cfg = {self.aptlistfile: {'source': 'deb $MIRROR $RELEASE multiverse'},
- 'notused': {'source': 'deb $MIRROR $RELEASE main',
- 'filename': self.aptlistfile2},
- self.aptlistfile3: {'source': 'deb $MIRROR $RELEASE universe'}}
- self.apt_src_replace_tri(cfg)
-
- def test_apt_src_replace_nofn(self):
- """Test Autoreplacement of MIRROR and RELEASE in source specs nofile"""
- cfg = {'source': 'deb $MIRROR $RELEASE multiverse'}
- with mock.patch.object(os.path, 'join', side_effect=self.myjoin):
- self.apt_src_replacement(self.fallbackfn, [cfg])
-
- def apt_src_keyid(self, filename, cfg, keynum):
- """apt_src_keyid
- Test specification of a source + keyid
- """
- cfg = self.wrapv1conf(cfg)
-
- with mock.patch.object(subp, 'subp',
- return_value=('fakekey 1234', '')) as mockobj:
- cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
-
- # check if it added the right ammount of keys
- calls = []
- for _ in range(keynum):
- calls.append(call(['apt-key', 'add', '-'],
- data=b'fakekey 1234',
- target=None))
- mockobj.assert_has_calls(calls, any_order=True)
-
- self.assertTrue(os.path.isfile(filename))
-
- contents = util.load_file(filename)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb",
- ('http://ppa.launchpad.net/smoser/'
- 'cloud-init-test/ubuntu'),
- "xenial", "main"),
- contents, flags=re.IGNORECASE))
-
- def test_apt_src_keyid(self):
- """Test specification of a source + keyid with filename being set"""
- cfg = {'source': ('deb '
- 'http://ppa.launchpad.net/'
- 'smoser/cloud-init-test/ubuntu'
- ' xenial main'),
- 'keyid': "03683F77",
- 'filename': self.aptlistfile}
- self.apt_src_keyid(self.aptlistfile, [cfg], 1)
-
- def test_apt_src_keyid_tri(self):
- """Test 3x specification of a source + keyid with filename being set"""
- cfg1 = {'source': ('deb '
- 'http://ppa.launchpad.net/'
- 'smoser/cloud-init-test/ubuntu'
- ' xenial main'),
- 'keyid': "03683F77",
- 'filename': self.aptlistfile}
- cfg2 = {'source': ('deb '
- 'http://ppa.launchpad.net/'
- 'smoser/cloud-init-test/ubuntu'
- ' xenial universe'),
- 'keyid': "03683F77",
- 'filename': self.aptlistfile2}
- cfg3 = {'source': ('deb '
- 'http://ppa.launchpad.net/'
- 'smoser/cloud-init-test/ubuntu'
- ' xenial multiverse'),
- 'keyid': "03683F77",
- 'filename': self.aptlistfile3}
-
- self.apt_src_keyid(self.aptlistfile, [cfg1, cfg2, cfg3], 3)
- contents = util.load_file(self.aptlistfile2)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb",
- ('http://ppa.launchpad.net/smoser/'
- 'cloud-init-test/ubuntu'),
- "xenial", "universe"),
- contents, flags=re.IGNORECASE))
- contents = util.load_file(self.aptlistfile3)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb",
- ('http://ppa.launchpad.net/smoser/'
- 'cloud-init-test/ubuntu'),
- "xenial", "multiverse"),
- contents, flags=re.IGNORECASE))
-
- def test_apt_src_keyid_nofn(self):
- """Test specification of a source + keyid without filename being set"""
- cfg = {'source': ('deb '
- 'http://ppa.launchpad.net/'
- 'smoser/cloud-init-test/ubuntu'
- ' xenial main'),
- 'keyid': "03683F77"}
- with mock.patch.object(os.path, 'join', side_effect=self.myjoin):
- self.apt_src_keyid(self.fallbackfn, [cfg], 1)
-
- def apt_src_key(self, filename, cfg):
- """apt_src_key
- Test specification of a source + key
- """
- cfg = self.wrapv1conf([cfg])
-
- with mock.patch.object(subp, 'subp') as mockobj:
- cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
-
- mockobj.assert_called_with(['apt-key', 'add', '-'],
- data=b'fakekey 4321', target=None)
-
- self.assertTrue(os.path.isfile(filename))
-
- contents = util.load_file(filename)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb",
- ('http://ppa.launchpad.net/smoser/'
- 'cloud-init-test/ubuntu'),
- "xenial", "main"),
- contents, flags=re.IGNORECASE))
-
- def test_apt_src_key(self):
- """Test specification of a source + key with filename being set"""
- cfg = {'source': ('deb '
- 'http://ppa.launchpad.net/'
- 'smoser/cloud-init-test/ubuntu'
- ' xenial main'),
- 'key': "fakekey 4321",
- 'filename': self.aptlistfile}
- self.apt_src_key(self.aptlistfile, cfg)
-
- def test_apt_src_key_nofn(self):
- """Test specification of a source + key without filename being set"""
- cfg = {'source': ('deb '
- 'http://ppa.launchpad.net/'
- 'smoser/cloud-init-test/ubuntu'
- ' xenial main'),
- 'key': "fakekey 4321"}
- with mock.patch.object(os.path, 'join', side_effect=self.myjoin):
- self.apt_src_key(self.fallbackfn, cfg)
-
- def test_apt_src_keyonly(self):
- """Test specifying key without source"""
- cfg = {'key': "fakekey 4242",
- 'filename': self.aptlistfile}
- cfg = self.wrapv1conf([cfg])
-
- with mock.patch.object(subp, 'subp') as mockobj:
- cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
-
- mockobj.assert_called_once_with(['apt-key', 'add', '-'],
- data=b'fakekey 4242', target=None)
-
- # filename should be ignored on key only
- self.assertFalse(os.path.isfile(self.aptlistfile))
-
- def test_apt_src_keyidonly(self):
- """Test specification of a keyid without source"""
- cfg = {'keyid': "03683F77",
- 'filename': self.aptlistfile}
- cfg = self.wrapv1conf([cfg])
-
- with mock.patch.object(subp, 'subp',
- return_value=('fakekey 1212', '')) as mockobj:
- cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
-
- mockobj.assert_called_with(['apt-key', 'add', '-'],
- data=b'fakekey 1212', target=None)
-
- # filename should be ignored on key only
- self.assertFalse(os.path.isfile(self.aptlistfile))
-
- def apt_src_keyid_real(self, cfg, expectedkey):
- """apt_src_keyid_real
- Test specification of a keyid without source including
- up to addition of the key (add_apt_key_raw mocked to keep the
- environment as is)
- """
- key = cfg['keyid']
- keyserver = cfg.get('keyserver', 'keyserver.ubuntu.com')
- cfg = self.wrapv1conf([cfg])
-
- with mock.patch.object(cc_apt_configure, 'add_apt_key_raw') as mockkey:
- with mock.patch.object(gpg, 'getkeybyid',
- return_value=expectedkey) as mockgetkey:
- cc_apt_configure.handle("test", cfg, self.fakecloud,
- None, None)
-
- mockgetkey.assert_called_with(key, keyserver)
- mockkey.assert_called_with(expectedkey, None)
-
- # filename should be ignored on key only
- self.assertFalse(os.path.isfile(self.aptlistfile))
-
- def test_apt_src_keyid_real(self):
- """test_apt_src_keyid_real - Test keyid including key add"""
- keyid = "03683F77"
- cfg = {'keyid': keyid,
- 'filename': self.aptlistfile}
-
- self.apt_src_keyid_real(cfg, EXPECTEDKEY)
-
- def test_apt_src_longkeyid_real(self):
- """test_apt_src_longkeyid_real - Test long keyid including key add"""
- keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77"
- cfg = {'keyid': keyid,
- 'filename': self.aptlistfile}
-
- self.apt_src_keyid_real(cfg, EXPECTEDKEY)
-
- def test_apt_src_longkeyid_ks_real(self):
- """test_apt_src_longkeyid_ks_real - Test long keyid from other ks"""
- keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77"
- cfg = {'keyid': keyid,
- 'keyserver': 'keys.gnupg.net',
- 'filename': self.aptlistfile}
-
- self.apt_src_keyid_real(cfg, EXPECTEDKEY)
-
- def test_apt_src_ppa(self):
- """Test adding a ppa"""
- cfg = {'source': 'ppa:smoser/cloud-init-test',
- 'filename': self.aptlistfile}
- cfg = self.wrapv1conf([cfg])
-
- with mock.patch.object(subp, 'subp') as mockobj:
- cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
- mockobj.assert_called_once_with(['add-apt-repository',
- 'ppa:smoser/cloud-init-test'],
- target=None)
-
- # adding ppa should ignore filename (uses add-apt-repository)
- self.assertFalse(os.path.isfile(self.aptlistfile))
-
- def test_apt_src_ppa_tri(self):
- """Test adding three ppa's"""
- cfg1 = {'source': 'ppa:smoser/cloud-init-test',
- 'filename': self.aptlistfile}
- cfg2 = {'source': 'ppa:smoser/cloud-init-test2',
- 'filename': self.aptlistfile2}
- cfg3 = {'source': 'ppa:smoser/cloud-init-test3',
- 'filename': self.aptlistfile3}
- cfg = self.wrapv1conf([cfg1, cfg2, cfg3])
-
- with mock.patch.object(subp, 'subp') as mockobj:
- cc_apt_configure.handle("test", cfg, self.fakecloud,
- None, None)
- calls = [call(['add-apt-repository', 'ppa:smoser/cloud-init-test'],
- target=None),
- call(['add-apt-repository', 'ppa:smoser/cloud-init-test2'],
- target=None),
- call(['add-apt-repository', 'ppa:smoser/cloud-init-test3'],
- target=None)]
- mockobj.assert_has_calls(calls, any_order=True)
-
- # adding ppa should ignore all filenames (uses add-apt-repository)
- self.assertFalse(os.path.isfile(self.aptlistfile))
- self.assertFalse(os.path.isfile(self.aptlistfile2))
- self.assertFalse(os.path.isfile(self.aptlistfile3))
-
- def test_convert_to_new_format(self):
- """Test the conversion of old to new format"""
- cfg1 = {'source': 'deb $MIRROR $RELEASE multiverse',
- 'filename': self.aptlistfile}
- cfg2 = {'source': 'deb $MIRROR $RELEASE main',
- 'filename': self.aptlistfile2}
- cfg3 = {'source': 'deb $MIRROR $RELEASE universe',
- 'filename': self.aptlistfile3}
- cfg = {'apt_sources': [cfg1, cfg2, cfg3]}
- checkcfg = {self.aptlistfile: {'filename': self.aptlistfile,
- 'source': 'deb $MIRROR $RELEASE '
- 'multiverse'},
- self.aptlistfile2: {'filename': self.aptlistfile2,
- 'source': 'deb $MIRROR $RELEASE main'},
- self.aptlistfile3: {'filename': self.aptlistfile3,
- 'source': 'deb $MIRROR $RELEASE '
- 'universe'}}
-
- newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg)
- self.assertEqual(newcfg['apt']['sources'], checkcfg)
-
- # convert again, should stay the same
- newcfg2 = cc_apt_configure.convert_to_v3_apt_format(newcfg)
- self.assertEqual(newcfg2['apt']['sources'], checkcfg)
-
- # should work without raising an exception
- cc_apt_configure.convert_to_v3_apt_format({})
-
- with self.assertRaises(ValueError):
- cc_apt_configure.convert_to_v3_apt_format({'apt_sources': 5})
-
- def test_convert_to_new_format_collision(self):
- """Test the conversion of old to new format with collisions
- That matches e.g. the MAAS case specifying old and new config"""
- cfg_1_and_3 = {'apt': {'proxy': 'http://192.168.122.1:8000/'},
- 'apt_proxy': 'http://192.168.122.1:8000/'}
- cfg_3_only = {'apt': {'proxy': 'http://192.168.122.1:8000/'}}
- cfgconflict = {'apt': {'proxy': 'http://192.168.122.1:8000/'},
- 'apt_proxy': 'ftp://192.168.122.1:8000/'}
-
- # collision (equal)
- newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg_1_and_3)
- self.assertEqual(newcfg, cfg_3_only)
- # collision (equal, so ok to remove)
- newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg_3_only)
- self.assertEqual(newcfg, cfg_3_only)
- # collision (unequal)
- match = "Old and New.*unequal.*apt_proxy"
- with self.assertRaisesRegex(ValueError, match):
- cc_apt_configure.convert_to_v3_apt_format(cfgconflict)
-
- def test_convert_to_new_format_dict_collision(self):
- cfg1 = {'source': 'deb $MIRROR $RELEASE multiverse',
- 'filename': self.aptlistfile}
- cfg2 = {'source': 'deb $MIRROR $RELEASE main',
- 'filename': self.aptlistfile2}
- cfg3 = {'source': 'deb $MIRROR $RELEASE universe',
- 'filename': self.aptlistfile3}
- fullv3 = {self.aptlistfile: {'filename': self.aptlistfile,
- 'source': 'deb $MIRROR $RELEASE '
- 'multiverse'},
- self.aptlistfile2: {'filename': self.aptlistfile2,
- 'source': 'deb $MIRROR $RELEASE main'},
- self.aptlistfile3: {'filename': self.aptlistfile3,
- 'source': 'deb $MIRROR $RELEASE '
- 'universe'}}
- cfg_3_only = {'apt': {'sources': fullv3}}
- cfg_1_and_3 = {'apt_sources': [cfg1, cfg2, cfg3]}
- cfg_1_and_3.update(cfg_3_only)
-
- # collision (equal, so ok to remove)
- newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg_1_and_3)
- self.assertEqual(newcfg, cfg_3_only)
- # no old spec (same result)
- newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg_3_only)
- self.assertEqual(newcfg, cfg_3_only)
-
- diff = {self.aptlistfile: {'filename': self.aptlistfile,
- 'source': 'deb $MIRROR $RELEASE '
- 'DIFFERENTVERSE'},
- self.aptlistfile2: {'filename': self.aptlistfile2,
- 'source': 'deb $MIRROR $RELEASE main'},
- self.aptlistfile3: {'filename': self.aptlistfile3,
- 'source': 'deb $MIRROR $RELEASE '
- 'universe'}}
- cfg_3_only = {'apt': {'sources': diff}}
- cfg_1_and_3_different = {'apt_sources': [cfg1, cfg2, cfg3]}
- cfg_1_and_3_different.update(cfg_3_only)
-
- # collision (unequal by dict having a different entry)
- with self.assertRaises(ValueError):
- cc_apt_configure.convert_to_v3_apt_format(cfg_1_and_3_different)
-
- missing = {self.aptlistfile: {'filename': self.aptlistfile,
- 'source': 'deb $MIRROR $RELEASE '
- 'multiverse'}}
- cfg_3_only = {'apt': {'sources': missing}}
- cfg_1_and_3_missing = {'apt_sources': [cfg1, cfg2, cfg3]}
- cfg_1_and_3_missing.update(cfg_3_only)
- # collision (unequal by dict missing an entry)
- with self.assertRaises(ValueError):
- cc_apt_configure.convert_to_v3_apt_format(cfg_1_and_3_missing)
-
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_apt_source_v3.py b/tests/unittests/test_handler/test_handler_apt_source_v3.py
deleted file mode 100644
index ac847238..00000000
--- a/tests/unittests/test_handler/test_handler_apt_source_v3.py
+++ /dev/null
@@ -1,1134 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""test_handler_apt_source_v3
-Testing various config variations of the apt_source custom config
-This tries to call all in the new v3 format and cares about new features
-"""
-import glob
-import os
-import re
-import shutil
-import socket
-import tempfile
-
-from unittest import TestCase, mock
-from unittest.mock import call
-
-from cloudinit import cloud
-from cloudinit import distros
-from cloudinit import gpg
-from cloudinit import helpers
-from cloudinit import subp
-from cloudinit import util
-
-from cloudinit.config import cc_apt_configure
-from cloudinit.sources import DataSourceNone
-
-from cloudinit.tests import helpers as t_help
-
-EXPECTEDKEY = u"""-----BEGIN PGP PUBLIC KEY BLOCK-----
-Version: GnuPG v1
-
-mI0ESuZLUgEEAKkqq3idtFP7g9hzOu1a8+v8ImawQN4TrvlygfScMU1TIS1eC7UQ
-NUA8Qqgr9iUaGnejb0VciqftLrU9D6WYHSKz+EITefgdyJ6SoQxjoJdsCpJ7o9Jy
-8PQnpRttiFm4qHu6BVnKnBNxw/z3ST9YMqW5kbMQpfxbGe+obRox59NpABEBAAG0
-HUxhdW5jaHBhZCBQUEEgZm9yIFNjb3R0IE1vc2VyiLYEEwECACAFAkrmS1ICGwMG
-CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRAGILvPA2g/d3aEA/9tVjc10HOZwV29
-OatVuTeERjjrIbxflO586GLA8cp0C9RQCwgod/R+cKYdQcHjbqVcP0HqxveLg0RZ
-FJpWLmWKamwkABErwQLGlM/Hwhjfade8VvEQutH5/0JgKHmzRsoqfR+LMO6OS+Sm
-S0ORP6HXET3+jC8BMG4tBWCTK/XEZw==
-=ACB2
------END PGP PUBLIC KEY BLOCK-----"""
-
-ADD_APT_REPO_MATCH = r"^[\w-]+:\w"
-
-TARGET = None
-
-MOCK_LSB_RELEASE_DATA = {
- 'id': 'Ubuntu', 'description': 'Ubuntu 18.04.1 LTS',
- 'release': '18.04', 'codename': 'bionic'}
-
-
-class FakeDatasource:
- """Fake Datasource helper object"""
- def __init__(self):
- self.region = 'region'
-
-
-class FakeCloud:
- """Fake Cloud helper object"""
- def __init__(self):
- self.datasource = FakeDatasource()
-
-
-class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
- """TestAptSourceConfig
- Main Class to test apt configs
- """
- def setUp(self):
- super(TestAptSourceConfig, self).setUp()
- self.tmp = tempfile.mkdtemp()
- self.new_root = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.tmp)
- self.addCleanup(shutil.rmtree, self.new_root)
- self.aptlistfile = os.path.join(self.tmp, "single-deb.list")
- self.aptlistfile2 = os.path.join(self.tmp, "single-deb2.list")
- self.aptlistfile3 = os.path.join(self.tmp, "single-deb3.list")
- self.join = os.path.join
- self.matcher = re.compile(ADD_APT_REPO_MATCH).search
- self.add_patch(
- 'cloudinit.config.cc_apt_configure.util.lsb_release',
- 'm_lsb_release', return_value=MOCK_LSB_RELEASE_DATA.copy())
-
- @staticmethod
- def _add_apt_sources(*args, **kwargs):
- with mock.patch.object(cc_apt_configure, 'update_packages'):
- cc_apt_configure.add_apt_sources(*args, **kwargs)
-
- @staticmethod
- def _get_default_params():
- """get_default_params
- Get the most basic default mrror and release info to be used in tests
- """
- params = {}
- params['RELEASE'] = MOCK_LSB_RELEASE_DATA['release']
- arch = 'amd64'
- params['MIRROR'] = cc_apt_configure.\
- get_default_mirrors(arch)["PRIMARY"]
- return params
-
- def _myjoin(self, *args, **kwargs):
- """_myjoin - redir into writable tmpdir"""
- if (args[0] == "/etc/apt/sources.list.d/" and
- args[1] == "cloud_config_sources.list" and
- len(args) == 2):
- return self.join(self.tmp, args[0].lstrip("/"), args[1])
- else:
- return self.join(*args, **kwargs)
-
- def _get_cloud(self, distro, metadata=None):
- self.patchUtils(self.new_root)
- paths = helpers.Paths({})
- cls = distros.fetch(distro)
- mydist = cls(distro, {}, paths)
- myds = DataSourceNone.DataSourceNone({}, mydist, paths)
- if metadata:
- myds.metadata.update(metadata)
- return cloud.Cloud(myds, paths, {}, mydist, None)
-
- def _apt_src_basic(self, filename, cfg):
- """_apt_src_basic
- Test Fix deb source string, has to overwrite mirror conf in params
- """
- params = self._get_default_params()
-
- self._add_apt_sources(cfg, TARGET, template_params=params,
- aa_repo_match=self.matcher)
-
- self.assertTrue(os.path.isfile(filename))
-
- contents = util.load_file(filename)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb", "http://test.ubuntu.com/ubuntu",
- "karmic-backports",
- "main universe multiverse restricted"),
- contents, flags=re.IGNORECASE))
-
- def test_apt_v3_src_basic(self):
- """test_apt_v3_src_basic - Test fix deb source string"""
- cfg = {self.aptlistfile: {'source':
- ('deb http://test.ubuntu.com/ubuntu'
- ' karmic-backports'
- ' main universe multiverse restricted')}}
- self._apt_src_basic(self.aptlistfile, cfg)
-
- def test_apt_v3_src_basic_tri(self):
- """test_apt_v3_src_basic_tri - Test multiple fix deb source strings"""
- cfg = {self.aptlistfile: {'source':
- ('deb http://test.ubuntu.com/ubuntu'
- ' karmic-backports'
- ' main universe multiverse restricted')},
- self.aptlistfile2: {'source':
- ('deb http://test.ubuntu.com/ubuntu'
- ' precise-backports'
- ' main universe multiverse restricted')},
- self.aptlistfile3: {'source':
- ('deb http://test.ubuntu.com/ubuntu'
- ' lucid-backports'
- ' main universe multiverse restricted')}}
- self._apt_src_basic(self.aptlistfile, cfg)
-
- # extra verify on two extra files of this test
- contents = util.load_file(self.aptlistfile2)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb", "http://test.ubuntu.com/ubuntu",
- "precise-backports",
- "main universe multiverse restricted"),
- contents, flags=re.IGNORECASE))
- contents = util.load_file(self.aptlistfile3)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb", "http://test.ubuntu.com/ubuntu",
- "lucid-backports",
- "main universe multiverse restricted"),
- contents, flags=re.IGNORECASE))
-
- def _apt_src_replacement(self, filename, cfg):
- """apt_src_replace
- Test Autoreplacement of MIRROR and RELEASE in source specs
- """
- params = self._get_default_params()
- self._add_apt_sources(cfg, TARGET, template_params=params,
- aa_repo_match=self.matcher)
-
- self.assertTrue(os.path.isfile(filename))
-
- contents = util.load_file(filename)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb", params['MIRROR'], params['RELEASE'],
- "multiverse"),
- contents, flags=re.IGNORECASE))
-
- def test_apt_v3_src_replace(self):
- """test_apt_v3_src_replace - Test replacement of MIRROR & RELEASE"""
- cfg = {self.aptlistfile: {'source': 'deb $MIRROR $RELEASE multiverse'}}
- self._apt_src_replacement(self.aptlistfile, cfg)
-
- def test_apt_v3_src_replace_fn(self):
- """test_apt_v3_src_replace_fn - Test filename overwritten in dict"""
- cfg = {'ignored': {'source': 'deb $MIRROR $RELEASE multiverse',
- 'filename': self.aptlistfile}}
- # second file should overwrite the dict key
- self._apt_src_replacement(self.aptlistfile, cfg)
-
- def _apt_src_replace_tri(self, cfg):
- """_apt_src_replace_tri
- Test three autoreplacements of MIRROR and RELEASE in source specs with
- generic part
- """
- self._apt_src_replacement(self.aptlistfile, cfg)
-
- # extra verify on two extra files of this test
- params = self._get_default_params()
- contents = util.load_file(self.aptlistfile2)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb", params['MIRROR'], params['RELEASE'],
- "main"),
- contents, flags=re.IGNORECASE))
- contents = util.load_file(self.aptlistfile3)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb", params['MIRROR'], params['RELEASE'],
- "universe"),
- contents, flags=re.IGNORECASE))
-
- def test_apt_v3_src_replace_tri(self):
- """test_apt_v3_src_replace_tri - Test multiple replace/overwrites"""
- cfg = {self.aptlistfile: {'source': 'deb $MIRROR $RELEASE multiverse'},
- 'notused': {'source': 'deb $MIRROR $RELEASE main',
- 'filename': self.aptlistfile2},
- self.aptlistfile3: {'source': 'deb $MIRROR $RELEASE universe'}}
- self._apt_src_replace_tri(cfg)
-
- def _apt_src_keyid(self, filename, cfg, keynum):
- """_apt_src_keyid
- Test specification of a source + keyid
- """
- params = self._get_default_params()
-
- with mock.patch("cloudinit.subp.subp",
- return_value=('fakekey 1234', '')) as mockobj:
- self._add_apt_sources(cfg, TARGET, template_params=params,
- aa_repo_match=self.matcher)
-
- # check if it added the right ammount of keys
- calls = []
- for _ in range(keynum):
- calls.append(call(['apt-key', 'add', '-'], data=b'fakekey 1234',
- target=TARGET))
- mockobj.assert_has_calls(calls, any_order=True)
-
- self.assertTrue(os.path.isfile(filename))
-
- contents = util.load_file(filename)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb",
- ('http://ppa.launchpad.net/smoser/'
- 'cloud-init-test/ubuntu'),
- "xenial", "main"),
- contents, flags=re.IGNORECASE))
-
- def test_apt_v3_src_keyid(self):
- """test_apt_v3_src_keyid - Test source + keyid with filename"""
- cfg = {self.aptlistfile: {'source': ('deb '
- 'http://ppa.launchpad.net/'
- 'smoser/cloud-init-test/ubuntu'
- ' xenial main'),
- 'keyid': "03683F77"}}
- self._apt_src_keyid(self.aptlistfile, cfg, 1)
-
- def test_apt_v3_src_keyid_tri(self):
- """test_apt_v3_src_keyid_tri - Test multiple src+key+filen writes"""
- cfg = {self.aptlistfile: {'source': ('deb '
- 'http://ppa.launchpad.net/'
- 'smoser/cloud-init-test/ubuntu'
- ' xenial main'),
- 'keyid': "03683F77"},
- 'ignored': {'source': ('deb '
- 'http://ppa.launchpad.net/'
- 'smoser/cloud-init-test/ubuntu'
- ' xenial universe'),
- 'keyid': "03683F77",
- 'filename': self.aptlistfile2},
- self.aptlistfile3: {'source': ('deb '
- 'http://ppa.launchpad.net/'
- 'smoser/cloud-init-test/ubuntu'
- ' xenial multiverse'),
- 'keyid': "03683F77"}}
-
- self._apt_src_keyid(self.aptlistfile, cfg, 3)
- contents = util.load_file(self.aptlistfile2)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb",
- ('http://ppa.launchpad.net/smoser/'
- 'cloud-init-test/ubuntu'),
- "xenial", "universe"),
- contents, flags=re.IGNORECASE))
- contents = util.load_file(self.aptlistfile3)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb",
- ('http://ppa.launchpad.net/smoser/'
- 'cloud-init-test/ubuntu'),
- "xenial", "multiverse"),
- contents, flags=re.IGNORECASE))
-
- def test_apt_v3_src_key(self):
- """test_apt_v3_src_key - Test source + key"""
- params = self._get_default_params()
- cfg = {self.aptlistfile: {'source': ('deb '
- 'http://ppa.launchpad.net/'
- 'smoser/cloud-init-test/ubuntu'
- ' xenial main'),
- 'key': "fakekey 4321"}}
-
- with mock.patch.object(subp, 'subp') as mockobj:
- self._add_apt_sources(cfg, TARGET, template_params=params,
- aa_repo_match=self.matcher)
-
- mockobj.assert_any_call(['apt-key', 'add', '-'], data=b'fakekey 4321',
- target=TARGET)
-
- self.assertTrue(os.path.isfile(self.aptlistfile))
-
- contents = util.load_file(self.aptlistfile)
- self.assertTrue(re.search(r"%s %s %s %s\n" %
- ("deb",
- ('http://ppa.launchpad.net/smoser/'
- 'cloud-init-test/ubuntu'),
- "xenial", "main"),
- contents, flags=re.IGNORECASE))
-
- def test_apt_v3_src_keyonly(self):
- """test_apt_v3_src_keyonly - Test key without source"""
- params = self._get_default_params()
- cfg = {self.aptlistfile: {'key': "fakekey 4242"}}
-
- with mock.patch.object(subp, 'subp') as mockobj:
- self._add_apt_sources(cfg, TARGET, template_params=params,
- aa_repo_match=self.matcher)
-
- mockobj.assert_any_call(['apt-key', 'add', '-'], data=b'fakekey 4242',
- target=TARGET)
-
- # filename should be ignored on key only
- self.assertFalse(os.path.isfile(self.aptlistfile))
-
- def test_apt_v3_src_keyidonly(self):
- """test_apt_v3_src_keyidonly - Test keyid without source"""
- params = self._get_default_params()
- cfg = {self.aptlistfile: {'keyid': "03683F77"}}
-
- with mock.patch.object(subp, 'subp',
- return_value=('fakekey 1212', '')) as mockobj:
- self._add_apt_sources(cfg, TARGET, template_params=params,
- aa_repo_match=self.matcher)
-
- mockobj.assert_any_call(['apt-key', 'add', '-'], data=b'fakekey 1212',
- target=TARGET)
-
- # filename should be ignored on key only
- self.assertFalse(os.path.isfile(self.aptlistfile))
-
- def apt_src_keyid_real(self, cfg, expectedkey):
- """apt_src_keyid_real
- Test specification of a keyid without source including
- up to addition of the key (add_apt_key_raw mocked to keep the
- environment as is)
- """
- params = self._get_default_params()
-
- with mock.patch.object(cc_apt_configure, 'add_apt_key_raw') as mockkey:
- with mock.patch.object(gpg, 'getkeybyid',
- return_value=expectedkey) as mockgetkey:
- self._add_apt_sources(cfg, TARGET, template_params=params,
- aa_repo_match=self.matcher)
-
- keycfg = cfg[self.aptlistfile]
- mockgetkey.assert_called_with(keycfg['keyid'],
- keycfg.get('keyserver',
- 'keyserver.ubuntu.com'))
- mockkey.assert_called_with(expectedkey, TARGET)
-
- # filename should be ignored on key only
- self.assertFalse(os.path.isfile(self.aptlistfile))
-
- def test_apt_v3_src_keyid_real(self):
- """test_apt_v3_src_keyid_real - Test keyid including key add"""
- keyid = "03683F77"
- cfg = {self.aptlistfile: {'keyid': keyid}}
-
- self.apt_src_keyid_real(cfg, EXPECTEDKEY)
-
- def test_apt_v3_src_longkeyid_real(self):
- """test_apt_v3_src_longkeyid_real Test long keyid including key add"""
- keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77"
- cfg = {self.aptlistfile: {'keyid': keyid}}
-
- self.apt_src_keyid_real(cfg, EXPECTEDKEY)
-
- def test_apt_v3_src_longkeyid_ks_real(self):
- """test_apt_v3_src_longkeyid_ks_real Test long keyid from other ks"""
- keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77"
- cfg = {self.aptlistfile: {'keyid': keyid,
- 'keyserver': 'keys.gnupg.net'}}
-
- self.apt_src_keyid_real(cfg, EXPECTEDKEY)
-
- def test_apt_v3_src_keyid_keyserver(self):
- """test_apt_v3_src_keyid_keyserver - Test custom keyserver"""
- keyid = "03683F77"
- params = self._get_default_params()
- cfg = {self.aptlistfile: {'keyid': keyid,
- 'keyserver': 'test.random.com'}}
-
- # in some test environments only *.ubuntu.com is reachable
- # so mock the call and check if the config got there
- with mock.patch.object(gpg, 'getkeybyid',
- return_value="fakekey") as mockgetkey:
- with mock.patch.object(cc_apt_configure,
- 'add_apt_key_raw') as mockadd:
- self._add_apt_sources(cfg, TARGET, template_params=params,
- aa_repo_match=self.matcher)
-
- mockgetkey.assert_called_with('03683F77', 'test.random.com')
- mockadd.assert_called_with('fakekey', TARGET)
-
- # filename should be ignored on key only
- self.assertFalse(os.path.isfile(self.aptlistfile))
-
- def test_apt_v3_src_ppa(self):
- """test_apt_v3_src_ppa - Test specification of a ppa"""
- params = self._get_default_params()
- cfg = {self.aptlistfile: {'source': 'ppa:smoser/cloud-init-test'}}
-
- with mock.patch("cloudinit.subp.subp") as mockobj:
- self._add_apt_sources(cfg, TARGET, template_params=params,
- aa_repo_match=self.matcher)
- mockobj.assert_any_call(['add-apt-repository',
- 'ppa:smoser/cloud-init-test'], target=TARGET)
-
- # adding ppa should ignore filename (uses add-apt-repository)
- self.assertFalse(os.path.isfile(self.aptlistfile))
-
- def test_apt_v3_src_ppa_tri(self):
- """test_apt_v3_src_ppa_tri - Test specification of multiple ppa's"""
- params = self._get_default_params()
- cfg = {self.aptlistfile: {'source': 'ppa:smoser/cloud-init-test'},
- self.aptlistfile2: {'source': 'ppa:smoser/cloud-init-test2'},
- self.aptlistfile3: {'source': 'ppa:smoser/cloud-init-test3'}}
-
- with mock.patch("cloudinit.subp.subp") as mockobj:
- self._add_apt_sources(cfg, TARGET, template_params=params,
- aa_repo_match=self.matcher)
- calls = [call(['add-apt-repository', 'ppa:smoser/cloud-init-test'],
- target=TARGET),
- call(['add-apt-repository', 'ppa:smoser/cloud-init-test2'],
- target=TARGET),
- call(['add-apt-repository', 'ppa:smoser/cloud-init-test3'],
- target=TARGET)]
- mockobj.assert_has_calls(calls, any_order=True)
-
- # adding ppa should ignore all filenames (uses add-apt-repository)
- self.assertFalse(os.path.isfile(self.aptlistfile))
- self.assertFalse(os.path.isfile(self.aptlistfile2))
- self.assertFalse(os.path.isfile(self.aptlistfile3))
-
- @mock.patch("cloudinit.config.cc_apt_configure.util.get_dpkg_architecture")
- def test_apt_v3_list_rename(self, m_get_dpkg_architecture):
- """test_apt_v3_list_rename - Test find mirror and apt list renaming"""
- pre = "/var/lib/apt/lists"
- # filenames are archive dependent
-
- arch = 's390x'
- m_get_dpkg_architecture.return_value = arch
- component = "ubuntu-ports"
- archive = "ports.ubuntu.com"
-
- cfg = {'primary': [{'arches': ["default"],
- 'uri':
- 'http://test.ubuntu.com/%s/' % component}],
- 'security': [{'arches': ["default"],
- 'uri':
- 'http://testsec.ubuntu.com/%s/' % component}]}
- post = ("%s_dists_%s-updates_InRelease" %
- (component, MOCK_LSB_RELEASE_DATA['codename']))
- fromfn = ("%s/%s_%s" % (pre, archive, post))
- tofn = ("%s/test.ubuntu.com_%s" % (pre, post))
-
- mirrors = cc_apt_configure.find_apt_mirror_info(cfg, FakeCloud(), arch)
-
- self.assertEqual(mirrors['MIRROR'],
- "http://test.ubuntu.com/%s/" % component)
- self.assertEqual(mirrors['PRIMARY'],
- "http://test.ubuntu.com/%s/" % component)
- self.assertEqual(mirrors['SECURITY'],
- "http://testsec.ubuntu.com/%s/" % component)
-
- with mock.patch.object(os, 'rename') as mockren:
- with mock.patch.object(glob, 'glob',
- return_value=[fromfn]):
- cc_apt_configure.rename_apt_lists(mirrors, TARGET, arch)
-
- mockren.assert_any_call(fromfn, tofn)
-
- @mock.patch("cloudinit.config.cc_apt_configure.util.get_dpkg_architecture")
- def test_apt_v3_list_rename_non_slash(self, m_get_dpkg_architecture):
- target = os.path.join(self.tmp, "rename_non_slash")
- apt_lists_d = os.path.join(target, "./" + cc_apt_configure.APT_LISTS)
-
- arch = 'amd64'
- m_get_dpkg_architecture.return_value = arch
-
- mirror_path = "some/random/path/"
- primary = "http://test.ubuntu.com/" + mirror_path
- security = "http://test-security.ubuntu.com/" + mirror_path
- mirrors = {'PRIMARY': primary, 'SECURITY': security}
-
- # these match default archive prefixes
- opri_pre = "archive.ubuntu.com_ubuntu_dists_xenial"
- osec_pre = "security.ubuntu.com_ubuntu_dists_xenial"
- # this one won't match and should not be renamed defaults.
- other_pre = "dl.google.com_linux_chrome_deb_dists_stable"
- # these are our new expected prefixes
- npri_pre = "test.ubuntu.com_some_random_path_dists_xenial"
- nsec_pre = "test-security.ubuntu.com_some_random_path_dists_xenial"
-
- files = [
- # orig prefix, new prefix, suffix
- (opri_pre, npri_pre, "_main_binary-amd64_Packages"),
- (opri_pre, npri_pre, "_main_binary-amd64_InRelease"),
- (opri_pre, npri_pre, "-updates_main_binary-amd64_Packages"),
- (opri_pre, npri_pre, "-updates_main_binary-amd64_InRelease"),
- (other_pre, other_pre, "_main_binary-amd64_Packages"),
- (other_pre, other_pre, "_Release"),
- (other_pre, other_pre, "_Release.gpg"),
- (osec_pre, nsec_pre, "_InRelease"),
- (osec_pre, nsec_pre, "_main_binary-amd64_Packages"),
- (osec_pre, nsec_pre, "_universe_binary-amd64_Packages"),
- ]
-
- expected = sorted([npre + suff for opre, npre, suff in files])
- # create files
- for (opre, _npre, suff) in files:
- fpath = os.path.join(apt_lists_d, opre + suff)
- util.write_file(fpath, content=fpath)
-
- cc_apt_configure.rename_apt_lists(mirrors, target, arch)
- found = sorted(os.listdir(apt_lists_d))
- self.assertEqual(expected, found)
-
- @staticmethod
- def test_apt_v3_proxy():
- """test_apt_v3_proxy - Test apt_*proxy configuration"""
- cfg = {"proxy": "foobar1",
- "http_proxy": "foobar2",
- "ftp_proxy": "foobar3",
- "https_proxy": "foobar4"}
-
- with mock.patch.object(util, 'write_file') as mockobj:
- cc_apt_configure.apply_apt_config(cfg, "proxyfn", "notused")
-
- mockobj.assert_called_with('proxyfn',
- ('Acquire::http::Proxy "foobar1";\n'
- 'Acquire::http::Proxy "foobar2";\n'
- 'Acquire::ftp::Proxy "foobar3";\n'
- 'Acquire::https::Proxy "foobar4";\n'))
-
- def test_apt_v3_mirror(self):
- """test_apt_v3_mirror - Test defining a mirror"""
- pmir = "http://us.archive.ubuntu.com/ubuntu/"
- smir = "http://security.ubuntu.com/ubuntu/"
- cfg = {"primary": [{'arches': ["default"],
- "uri": pmir}],
- "security": [{'arches': ["default"],
- "uri": smir}]}
-
- mirrors = cc_apt_configure.find_apt_mirror_info(
- cfg, FakeCloud(), 'amd64')
-
- self.assertEqual(mirrors['MIRROR'],
- pmir)
- self.assertEqual(mirrors['PRIMARY'],
- pmir)
- self.assertEqual(mirrors['SECURITY'],
- smir)
-
- def test_apt_v3_mirror_default(self):
- """test_apt_v3_mirror_default - Test without defining a mirror"""
- arch = 'amd64'
- default_mirrors = cc_apt_configure.get_default_mirrors(arch)
- pmir = default_mirrors["PRIMARY"]
- smir = default_mirrors["SECURITY"]
- mycloud = self._get_cloud('ubuntu')
- mirrors = cc_apt_configure.find_apt_mirror_info({}, mycloud, arch)
-
- self.assertEqual(mirrors['MIRROR'],
- pmir)
- self.assertEqual(mirrors['PRIMARY'],
- pmir)
- self.assertEqual(mirrors['SECURITY'],
- smir)
-
- def test_apt_v3_mirror_arches(self):
- """test_apt_v3_mirror_arches - Test arches selection of mirror"""
- pmir = "http://my-primary.ubuntu.com/ubuntu/"
- smir = "http://my-security.ubuntu.com/ubuntu/"
- arch = 'ppc64el'
- cfg = {"primary": [{'arches': ["default"], "uri": "notthis-primary"},
- {'arches': [arch], "uri": pmir}],
- "security": [{'arches': ["default"], "uri": "nothis-security"},
- {'arches': [arch], "uri": smir}]}
-
- mirrors = cc_apt_configure.find_apt_mirror_info(cfg, FakeCloud(), arch)
-
- self.assertEqual(mirrors['PRIMARY'], pmir)
- self.assertEqual(mirrors['MIRROR'], pmir)
- self.assertEqual(mirrors['SECURITY'], smir)
-
- def test_apt_v3_mirror_arches_default(self):
- """test_apt_v3_mirror_arches - Test falling back to default arch"""
- pmir = "http://us.archive.ubuntu.com/ubuntu/"
- smir = "http://security.ubuntu.com/ubuntu/"
- cfg = {"primary": [{'arches': ["default"],
- "uri": pmir},
- {'arches': ["thisarchdoesntexist"],
- "uri": "notthis"}],
- "security": [{'arches': ["thisarchdoesntexist"],
- "uri": "nothat"},
- {'arches': ["default"],
- "uri": smir}]}
-
- mirrors = cc_apt_configure.find_apt_mirror_info(
- cfg, FakeCloud(), 'amd64')
-
- self.assertEqual(mirrors['MIRROR'],
- pmir)
- self.assertEqual(mirrors['PRIMARY'],
- pmir)
- self.assertEqual(mirrors['SECURITY'],
- smir)
-
- @mock.patch("cloudinit.config.cc_apt_configure.util.get_dpkg_architecture")
- def test_apt_v3_get_def_mir_non_intel_no_arch(
- self, m_get_dpkg_architecture
- ):
- arch = 'ppc64el'
- m_get_dpkg_architecture.return_value = arch
- expected = {'PRIMARY': 'http://ports.ubuntu.com/ubuntu-ports',
- 'SECURITY': 'http://ports.ubuntu.com/ubuntu-ports'}
- self.assertEqual(expected, cc_apt_configure.get_default_mirrors())
-
- def test_apt_v3_get_default_mirrors_non_intel_with_arch(self):
- found = cc_apt_configure.get_default_mirrors('ppc64el')
-
- expected = {'PRIMARY': 'http://ports.ubuntu.com/ubuntu-ports',
- 'SECURITY': 'http://ports.ubuntu.com/ubuntu-ports'}
- self.assertEqual(expected, found)
-
- def test_apt_v3_mirror_arches_sysdefault(self):
- """test_apt_v3_mirror_arches - Test arches fallback to sys default"""
- arch = 'amd64'
- default_mirrors = cc_apt_configure.get_default_mirrors(arch)
- pmir = default_mirrors["PRIMARY"]
- smir = default_mirrors["SECURITY"]
- mycloud = self._get_cloud('ubuntu')
- cfg = {"primary": [{'arches': ["thisarchdoesntexist_64"],
- "uri": "notthis"},
- {'arches': ["thisarchdoesntexist"],
- "uri": "notthiseither"}],
- "security": [{'arches': ["thisarchdoesntexist"],
- "uri": "nothat"},
- {'arches': ["thisarchdoesntexist_64"],
- "uri": "nothateither"}]}
-
- mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch)
-
- self.assertEqual(mirrors['MIRROR'], pmir)
- self.assertEqual(mirrors['PRIMARY'], pmir)
- self.assertEqual(mirrors['SECURITY'], smir)
-
- def test_apt_v3_mirror_search(self):
- """test_apt_v3_mirror_search - Test searching mirrors in a list
- mock checks to avoid relying on network connectivity"""
- pmir = "http://us.archive.ubuntu.com/ubuntu/"
- smir = "http://security.ubuntu.com/ubuntu/"
- cfg = {"primary": [{'arches': ["default"],
- "search": ["pfailme", pmir]}],
- "security": [{'arches': ["default"],
- "search": ["sfailme", smir]}]}
-
- with mock.patch.object(cc_apt_configure.util, 'search_for_mirror',
- side_effect=[pmir, smir]) as mocksearch:
- mirrors = cc_apt_configure.find_apt_mirror_info(cfg, FakeCloud(),
- 'amd64')
-
- calls = [call(["pfailme", pmir]),
- call(["sfailme", smir])]
- mocksearch.assert_has_calls(calls)
-
- self.assertEqual(mirrors['MIRROR'],
- pmir)
- self.assertEqual(mirrors['PRIMARY'],
- pmir)
- self.assertEqual(mirrors['SECURITY'],
- smir)
-
- def test_apt_v3_mirror_search_many2(self):
- """test_apt_v3_mirror_search_many3 - Test both mirrors specs at once"""
- pmir = "http://us.archive.ubuntu.com/ubuntu/"
- smir = "http://security.ubuntu.com/ubuntu/"
- cfg = {"primary": [{'arches': ["default"],
- "uri": pmir,
- "search": ["pfailme", "foo"]}],
- "security": [{'arches': ["default"],
- "uri": smir,
- "search": ["sfailme", "bar"]}]}
-
- arch = 'amd64'
-
- # should be called only once per type, despite two mirror configs
- mycloud = None
- with mock.patch.object(cc_apt_configure, 'get_mirror',
- return_value="http://mocked/foo") as mockgm:
- mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch)
- calls = [call(cfg, 'primary', arch, mycloud),
- call(cfg, 'security', arch, mycloud)]
- mockgm.assert_has_calls(calls)
-
- # should not be called, since primary is specified
- with mock.patch.object(cc_apt_configure.util,
- 'search_for_mirror') as mockse:
- mirrors = cc_apt_configure.find_apt_mirror_info(
- cfg, FakeCloud(), arch)
- mockse.assert_not_called()
-
- self.assertEqual(mirrors['MIRROR'],
- pmir)
- self.assertEqual(mirrors['PRIMARY'],
- pmir)
- self.assertEqual(mirrors['SECURITY'],
- smir)
-
- def test_apt_v3_url_resolvable(self):
- """test_apt_v3_url_resolvable - Test resolving urls"""
-
- with mock.patch.object(util, 'is_resolvable') as mockresolve:
- util.is_resolvable_url("http://1.2.3.4/ubuntu")
- mockresolve.assert_called_with("1.2.3.4")
-
- with mock.patch.object(util, 'is_resolvable') as mockresolve:
- util.is_resolvable_url("http://us.archive.ubuntu.com/ubuntu")
- mockresolve.assert_called_with("us.archive.ubuntu.com")
-
- # former tests can leave this set (or not if the test is ran directly)
- # do a hard reset to ensure a stable result
- util._DNS_REDIRECT_IP = None
- bad = [(None, None, None, "badname", ["10.3.2.1"])]
- good = [(None, None, None, "goodname", ["10.2.3.4"])]
- with mock.patch.object(socket, 'getaddrinfo',
- side_effect=[bad, bad, bad, good,
- good]) as mocksock:
- ret = util.is_resolvable_url("http://us.archive.ubuntu.com/ubuntu")
- ret2 = util.is_resolvable_url("http://1.2.3.4/ubuntu")
- mocksock.assert_any_call('does-not-exist.example.com.', None,
- 0, 0, 1, 2)
- mocksock.assert_any_call('example.invalid.', None, 0, 0, 1, 2)
- mocksock.assert_any_call('us.archive.ubuntu.com', None)
- mocksock.assert_any_call('1.2.3.4', None)
-
- self.assertTrue(ret)
- self.assertTrue(ret2)
-
- # side effect need only bad ret after initial call
- with mock.patch.object(socket, 'getaddrinfo',
- side_effect=[bad]) as mocksock:
- ret3 = util.is_resolvable_url("http://failme.com/ubuntu")
- calls = [call('failme.com', None)]
- mocksock.assert_has_calls(calls)
- self.assertFalse(ret3)
-
- def test_apt_v3_disable_suites(self):
- """test_disable_suites - disable_suites with many configurations"""
- release = "xenial"
- orig = """deb http://ubuntu.com//ubuntu xenial main
-deb http://ubuntu.com//ubuntu xenial-updates main
-deb http://ubuntu.com//ubuntu xenial-security main
-deb-src http://ubuntu.com//ubuntu universe multiverse
-deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
-
- # disable nothing
- disabled = []
- expect = """deb http://ubuntu.com//ubuntu xenial main
-deb http://ubuntu.com//ubuntu xenial-updates main
-deb http://ubuntu.com//ubuntu xenial-security main
-deb-src http://ubuntu.com//ubuntu universe multiverse
-deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
- result = cc_apt_configure.disable_suites(disabled, orig, release)
- self.assertEqual(expect, result)
-
- # single disable release suite
- disabled = ["$RELEASE"]
- expect = """\
-# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu xenial main
-deb http://ubuntu.com//ubuntu xenial-updates main
-deb http://ubuntu.com//ubuntu xenial-security main
-deb-src http://ubuntu.com//ubuntu universe multiverse
-deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
- result = cc_apt_configure.disable_suites(disabled, orig, release)
- self.assertEqual(expect, result)
-
- # single disable other suite
- disabled = ["$RELEASE-updates"]
- expect = ("""deb http://ubuntu.com//ubuntu xenial main
-# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu"""
- """ xenial-updates main
-deb http://ubuntu.com//ubuntu xenial-security main
-deb-src http://ubuntu.com//ubuntu universe multiverse
-deb http://ubuntu.com/ubuntu/ xenial-proposed main""")
- result = cc_apt_configure.disable_suites(disabled, orig, release)
- self.assertEqual(expect, result)
-
- # multi disable
- disabled = ["$RELEASE-updates", "$RELEASE-security"]
- expect = ("""deb http://ubuntu.com//ubuntu xenial main
-# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """
- """xenial-updates main
-# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """
- """xenial-security main
-deb-src http://ubuntu.com//ubuntu universe multiverse
-deb http://ubuntu.com/ubuntu/ xenial-proposed main""")
- result = cc_apt_configure.disable_suites(disabled, orig, release)
- self.assertEqual(expect, result)
-
- # multi line disable (same suite multiple times in input)
- disabled = ["$RELEASE-updates", "$RELEASE-security"]
- orig = """deb http://ubuntu.com//ubuntu xenial main
-deb http://ubuntu.com//ubuntu xenial-updates main
-deb http://ubuntu.com//ubuntu xenial-security main
-deb-src http://ubuntu.com//ubuntu universe multiverse
-deb http://UBUNTU.com//ubuntu xenial-updates main
-deb http://UBUNTU.COM//ubuntu xenial-updates main
-deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
- expect = ("""deb http://ubuntu.com//ubuntu xenial main
-# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """
- """xenial-updates main
-# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """
- """xenial-security main
-deb-src http://ubuntu.com//ubuntu universe multiverse
-# suite disabled by cloud-init: deb http://UBUNTU.com//ubuntu """
- """xenial-updates main
-# suite disabled by cloud-init: deb http://UBUNTU.COM//ubuntu """
- """xenial-updates main
-deb http://ubuntu.com/ubuntu/ xenial-proposed main""")
- result = cc_apt_configure.disable_suites(disabled, orig, release)
- self.assertEqual(expect, result)
-
- # comment in input
- disabled = ["$RELEASE-updates", "$RELEASE-security"]
- orig = """deb http://ubuntu.com//ubuntu xenial main
-deb http://ubuntu.com//ubuntu xenial-updates main
-deb http://ubuntu.com//ubuntu xenial-security main
-deb-src http://ubuntu.com//ubuntu universe multiverse
-#foo
-#deb http://UBUNTU.com//ubuntu xenial-updates main
-deb http://UBUNTU.COM//ubuntu xenial-updates main
-deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
- expect = ("""deb http://ubuntu.com//ubuntu xenial main
-# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """
- """xenial-updates main
-# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """
- """xenial-security main
-deb-src http://ubuntu.com//ubuntu universe multiverse
-#foo
-#deb http://UBUNTU.com//ubuntu xenial-updates main
-# suite disabled by cloud-init: deb http://UBUNTU.COM//ubuntu """
- """xenial-updates main
-deb http://ubuntu.com/ubuntu/ xenial-proposed main""")
- result = cc_apt_configure.disable_suites(disabled, orig, release)
- self.assertEqual(expect, result)
-
- # single disable custom suite
- disabled = ["foobar"]
- orig = """deb http://ubuntu.com//ubuntu xenial main
-deb http://ubuntu.com//ubuntu xenial-updates main
-deb http://ubuntu.com//ubuntu xenial-security main
-deb http://ubuntu.com/ubuntu/ foobar main"""
- expect = """deb http://ubuntu.com//ubuntu xenial main
-deb http://ubuntu.com//ubuntu xenial-updates main
-deb http://ubuntu.com//ubuntu xenial-security main
-# suite disabled by cloud-init: deb http://ubuntu.com/ubuntu/ foobar main"""
- result = cc_apt_configure.disable_suites(disabled, orig, release)
- self.assertEqual(expect, result)
-
- # single disable non existing suite
- disabled = ["foobar"]
- orig = """deb http://ubuntu.com//ubuntu xenial main
-deb http://ubuntu.com//ubuntu xenial-updates main
-deb http://ubuntu.com//ubuntu xenial-security main
-deb http://ubuntu.com/ubuntu/ notfoobar main"""
- expect = """deb http://ubuntu.com//ubuntu xenial main
-deb http://ubuntu.com//ubuntu xenial-updates main
-deb http://ubuntu.com//ubuntu xenial-security main
-deb http://ubuntu.com/ubuntu/ notfoobar main"""
- result = cc_apt_configure.disable_suites(disabled, orig, release)
- self.assertEqual(expect, result)
-
- # single disable suite with option
- disabled = ["$RELEASE-updates"]
- orig = """deb http://ubuntu.com//ubuntu xenial main
-deb [a=b] http://ubu.com//ubu xenial-updates main
-deb http://ubuntu.com//ubuntu xenial-security main
-deb-src http://ubuntu.com//ubuntu universe multiverse
-deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
- expect = ("""deb http://ubuntu.com//ubuntu xenial main
-# suite disabled by cloud-init: deb [a=b] http://ubu.com//ubu """
- """xenial-updates main
-deb http://ubuntu.com//ubuntu xenial-security main
-deb-src http://ubuntu.com//ubuntu universe multiverse
-deb http://ubuntu.com/ubuntu/ xenial-proposed main""")
- result = cc_apt_configure.disable_suites(disabled, orig, release)
- self.assertEqual(expect, result)
-
- # single disable suite with more options and auto $RELEASE expansion
- disabled = ["updates"]
- orig = """deb http://ubuntu.com//ubuntu xenial main
-deb [a=b c=d] http://ubu.com//ubu xenial-updates main
-deb http://ubuntu.com//ubuntu xenial-security main
-deb-src http://ubuntu.com//ubuntu universe multiverse
-deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
- expect = """deb http://ubuntu.com//ubuntu xenial main
-# suite disabled by cloud-init: deb [a=b c=d] \
-http://ubu.com//ubu xenial-updates main
-deb http://ubuntu.com//ubuntu xenial-security main
-deb-src http://ubuntu.com//ubuntu universe multiverse
-deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
- result = cc_apt_configure.disable_suites(disabled, orig, release)
- self.assertEqual(expect, result)
-
- # single disable suite while options at others
- disabled = ["$RELEASE-security"]
- orig = """deb http://ubuntu.com//ubuntu xenial main
-deb [arch=foo] http://ubuntu.com//ubuntu xenial-updates main
-deb http://ubuntu.com//ubuntu xenial-security main
-deb-src http://ubuntu.com//ubuntu universe multiverse
-deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
- expect = ("""deb http://ubuntu.com//ubuntu xenial main
-deb [arch=foo] http://ubuntu.com//ubuntu xenial-updates main
-# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """
- """xenial-security main
-deb-src http://ubuntu.com//ubuntu universe multiverse
-deb http://ubuntu.com/ubuntu/ xenial-proposed main""")
- result = cc_apt_configure.disable_suites(disabled, orig, release)
- self.assertEqual(expect, result)
-
- def test_disable_suites_blank_lines(self):
- """test_disable_suites_blank_lines - ensure blank lines allowed"""
- lines = ["deb %(repo)s %(rel)s main universe",
- "",
- "deb %(repo)s %(rel)s-updates main universe",
- " # random comment",
- "#comment here",
- ""]
- rel = "trusty"
- repo = 'http://example.com/mirrors/ubuntu'
- orig = "\n".join(lines) % {'repo': repo, 'rel': rel}
- self.assertEqual(
- orig, cc_apt_configure.disable_suites(["proposed"], orig, rel))
-
- @mock.patch("cloudinit.util.get_hostname", return_value='abc.localdomain')
- def test_apt_v3_mirror_search_dns(self, m_get_hostname):
- """test_apt_v3_mirror_search_dns - Test searching dns patterns"""
- pmir = "phit"
- smir = "shit"
- arch = 'amd64'
- mycloud = self._get_cloud('ubuntu')
- cfg = {"primary": [{'arches': ["default"],
- "search_dns": True}],
- "security": [{'arches': ["default"],
- "search_dns": True}]}
-
- with mock.patch.object(cc_apt_configure, 'get_mirror',
- return_value="http://mocked/foo") as mockgm:
- mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch)
- calls = [call(cfg, 'primary', arch, mycloud),
- call(cfg, 'security', arch, mycloud)]
- mockgm.assert_has_calls(calls)
-
- with mock.patch.object(cc_apt_configure, 'search_for_mirror_dns',
- return_value="http://mocked/foo") as mocksdns:
- mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch)
- calls = [call(True, 'primary', cfg, mycloud),
- call(True, 'security', cfg, mycloud)]
- mocksdns.assert_has_calls(calls)
-
- # first return is for the non-dns call before
- with mock.patch.object(cc_apt_configure.util, 'search_for_mirror',
- side_effect=[None, pmir, None, smir]) as mockse:
- mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch)
-
- calls = [call(None),
- call(['http://ubuntu-mirror.localdomain/ubuntu',
- 'http://ubuntu-mirror/ubuntu']),
- call(None),
- call(['http://ubuntu-security-mirror.localdomain/ubuntu',
- 'http://ubuntu-security-mirror/ubuntu'])]
- mockse.assert_has_calls(calls)
-
- self.assertEqual(mirrors['MIRROR'],
- pmir)
- self.assertEqual(mirrors['PRIMARY'],
- pmir)
- self.assertEqual(mirrors['SECURITY'],
- smir)
-
-
-class TestDebconfSelections(TestCase):
-
- @mock.patch("cloudinit.config.cc_apt_configure.subp.subp")
- def test_set_sel_appends_newline_if_absent(self, m_subp):
- """Automatically append a newline to debconf-set-selections config."""
- selections = b'some/setting boolean true'
- cc_apt_configure.debconf_set_selections(selections=selections)
- cc_apt_configure.debconf_set_selections(selections=selections + b'\n')
- m_call = mock.call(
- ['debconf-set-selections'], data=selections + b'\n', capture=True,
- target=None)
- self.assertEqual([m_call, m_call], m_subp.call_args_list)
-
- @mock.patch("cloudinit.config.cc_apt_configure.debconf_set_selections")
- def test_no_set_sel_if_none_to_set(self, m_set_sel):
- cc_apt_configure.apply_debconf_selections({'foo': 'bar'})
- m_set_sel.assert_not_called()
-
- @mock.patch("cloudinit.config.cc_apt_configure."
- "debconf_set_selections")
- @mock.patch("cloudinit.config.cc_apt_configure."
- "util.get_installed_packages")
- def test_set_sel_call_has_expected_input(self, m_get_inst, m_set_sel):
- data = {
- 'set1': 'pkga pkga/q1 mybool false',
- 'set2': ('pkgb\tpkgb/b1\tstr\tthis is a string\n'
- 'pkgc\tpkgc/ip\tstring\t10.0.0.1')}
- lines = '\n'.join(data.values()).split('\n')
-
- m_get_inst.return_value = ["adduser", "apparmor"]
- m_set_sel.return_value = None
-
- cc_apt_configure.apply_debconf_selections({'debconf_selections': data})
- self.assertTrue(m_get_inst.called)
- self.assertEqual(m_set_sel.call_count, 1)
-
- # assumes called with *args value.
- selections = m_set_sel.call_args_list[0][0][0].decode()
-
- missing = [
- line for line in lines if line not in selections.splitlines()
- ]
- self.assertEqual([], missing)
-
- @mock.patch("cloudinit.config.cc_apt_configure.dpkg_reconfigure")
- @mock.patch("cloudinit.config.cc_apt_configure.debconf_set_selections")
- @mock.patch("cloudinit.config.cc_apt_configure."
- "util.get_installed_packages")
- def test_reconfigure_if_intersection(self, m_get_inst, m_set_sel,
- m_dpkg_r):
- data = {
- 'set1': 'pkga pkga/q1 mybool false',
- 'set2': ('pkgb\tpkgb/b1\tstr\tthis is a string\n'
- 'pkgc\tpkgc/ip\tstring\t10.0.0.1'),
- 'cloud-init': ('cloud-init cloud-init/datasources'
- 'multiselect MAAS')}
-
- m_set_sel.return_value = None
- m_get_inst.return_value = ["adduser", "apparmor", "pkgb",
- "cloud-init", 'zdog']
-
- cc_apt_configure.apply_debconf_selections({'debconf_selections': data})
-
- # reconfigure should be called with the intersection
- # of (packages in config, packages installed)
- self.assertEqual(m_dpkg_r.call_count, 1)
- # assumes called with *args (dpkg_reconfigure([a,b,c], target=))
- packages = m_dpkg_r.call_args_list[0][0][0]
- self.assertEqual(set(['cloud-init', 'pkgb']), set(packages))
-
- @mock.patch("cloudinit.config.cc_apt_configure.dpkg_reconfigure")
- @mock.patch("cloudinit.config.cc_apt_configure.debconf_set_selections")
- @mock.patch("cloudinit.config.cc_apt_configure."
- "util.get_installed_packages")
- def test_reconfigure_if_no_intersection(self, m_get_inst, m_set_sel,
- m_dpkg_r):
- data = {'set1': 'pkga pkga/q1 mybool false'}
-
- m_get_inst.return_value = ["adduser", "apparmor", "pkgb",
- "cloud-init", 'zdog']
- m_set_sel.return_value = None
-
- cc_apt_configure.apply_debconf_selections({'debconf_selections': data})
-
- self.assertTrue(m_get_inst.called)
- self.assertEqual(m_dpkg_r.call_count, 0)
-
- @mock.patch("cloudinit.config.cc_apt_configure.subp.subp")
- def test_dpkg_reconfigure_does_reconfigure(self, m_subp):
- target = "/foo-target"
-
- # due to the way the cleaners are called (via dictionary reference)
- # mocking clean_cloud_init directly does not work. So we mock
- # the CONFIG_CLEANERS dictionary and assert our cleaner is called.
- ci_cleaner = mock.MagicMock()
- with mock.patch.dict(("cloudinit.config.cc_apt_configure."
- "CONFIG_CLEANERS"),
- values={'cloud-init': ci_cleaner}, clear=True):
- cc_apt_configure.dpkg_reconfigure(['pkga', 'cloud-init'],
- target=target)
- # cloud-init is actually the only package we have a cleaner for
- # so for now, its the only one that should reconfigured
- self.assertTrue(m_subp.called)
- ci_cleaner.assert_called_with(target)
- self.assertEqual(m_subp.call_count, 1)
- found = m_subp.call_args_list[0][0][0]
- expected = ['dpkg-reconfigure', '--frontend=noninteractive',
- 'cloud-init']
- self.assertEqual(expected, found)
-
- @mock.patch("cloudinit.config.cc_apt_configure.subp.subp")
- def test_dpkg_reconfigure_not_done_on_no_data(self, m_subp):
- cc_apt_configure.dpkg_reconfigure([])
- m_subp.assert_not_called()
-
- @mock.patch("cloudinit.config.cc_apt_configure.subp.subp")
- def test_dpkg_reconfigure_not_done_if_no_cleaners(self, m_subp):
- cc_apt_configure.dpkg_reconfigure(['pkgfoo', 'pkgbar'])
- m_subp.assert_not_called()
-
-#
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_bootcmd.py b/tests/unittests/test_handler/test_handler_bootcmd.py
deleted file mode 100644
index b53d60d4..00000000
--- a/tests/unittests/test_handler/test_handler_bootcmd.py
+++ /dev/null
@@ -1,161 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit.config.cc_bootcmd import handle, schema
-from cloudinit.sources import DataSourceNone
-from cloudinit import (distros, helpers, cloud, subp, util)
-from cloudinit.tests.helpers import (
- CiTestCase, mock, SchemaTestCaseMixin, skipUnlessJsonSchema)
-
-import logging
-import tempfile
-
-
-LOG = logging.getLogger(__name__)
-
-
-class FakeExtendedTempFile(object):
- def __init__(self, suffix):
- self.suffix = suffix
- self.handle = tempfile.NamedTemporaryFile(
- prefix="ci-%s." % self.__class__.__name__, delete=False)
-
- def __enter__(self):
- return self.handle
-
- def __exit__(self, exc_type, exc_value, traceback):
- self.handle.close()
- util.del_file(self.handle.name)
-
-
-class TestBootcmd(CiTestCase):
-
- with_logs = True
-
- _etmpfile_path = ('cloudinit.config.cc_bootcmd.temp_utils.'
- 'ExtendedTemporaryFile')
-
- def setUp(self):
- super(TestBootcmd, self).setUp()
- self.subp = subp.subp
- self.new_root = self.tmp_dir()
-
- def _get_cloud(self, distro):
- paths = helpers.Paths({})
- cls = distros.fetch(distro)
- mydist = cls(distro, {}, paths)
- myds = DataSourceNone.DataSourceNone({}, mydist, paths)
- paths.datasource = myds
- return cloud.Cloud(myds, paths, {}, mydist, None)
-
- def test_handler_skip_if_no_bootcmd(self):
- """When the provided config doesn't contain bootcmd, skip it."""
- cfg = {}
- mycloud = self._get_cloud('ubuntu')
- handle('notimportant', cfg, mycloud, LOG, None)
- self.assertIn(
- "Skipping module named notimportant, no 'bootcmd' key",
- self.logs.getvalue())
-
- def test_handler_invalid_command_set(self):
- """Commands which can't be converted to shell will raise errors."""
- invalid_config = {'bootcmd': 1}
- cc = self._get_cloud('ubuntu')
- with self.assertRaises(TypeError) as context_manager:
- handle('cc_bootcmd', invalid_config, cc, LOG, [])
- self.assertIn('Failed to shellify bootcmd', self.logs.getvalue())
- self.assertEqual(
- "Input to shellify was type 'int'. Expected list or tuple.",
- str(context_manager.exception))
-
- @skipUnlessJsonSchema()
- def test_handler_schema_validation_warns_non_array_type(self):
- """Schema validation warns of non-array type for bootcmd key.
-
- Schema validation is not strict, so bootcmd attempts to shellify the
- invalid content.
- """
- invalid_config = {'bootcmd': 1}
- cc = self._get_cloud('ubuntu')
- with self.assertRaises(TypeError):
- handle('cc_bootcmd', invalid_config, cc, LOG, [])
- self.assertIn(
- 'Invalid config:\nbootcmd: 1 is not of type \'array\'',
- self.logs.getvalue())
- self.assertIn('Failed to shellify', self.logs.getvalue())
-
- @skipUnlessJsonSchema()
- def test_handler_schema_validation_warns_non_array_item_type(self):
- """Schema validation warns of non-array or string bootcmd items.
-
- Schema validation is not strict, so bootcmd attempts to shellify the
- invalid content.
- """
- invalid_config = {
- 'bootcmd': ['ls /', 20, ['wget', 'http://stuff/blah'], {'a': 'n'}]}
- cc = self._get_cloud('ubuntu')
- with self.assertRaises(TypeError) as context_manager:
- handle('cc_bootcmd', invalid_config, cc, LOG, [])
- expected_warnings = [
- 'bootcmd.1: 20 is not valid under any of the given schemas',
- 'bootcmd.3: {\'a\': \'n\'} is not valid under any of the given'
- ' schema'
- ]
- logs = self.logs.getvalue()
- for warning in expected_warnings:
- self.assertIn(warning, logs)
- self.assertIn('Failed to shellify', logs)
- self.assertEqual(
- ("Unable to shellify type 'int'. Expected list, string, tuple. "
- "Got: 20"),
- str(context_manager.exception))
-
- def test_handler_creates_and_runs_bootcmd_script_with_instance_id(self):
- """Valid schema runs a bootcmd script with INSTANCE_ID in the env."""
- cc = self._get_cloud('ubuntu')
- out_file = self.tmp_path('bootcmd.out', self.new_root)
- my_id = "b6ea0f59-e27d-49c6-9f87-79f19765a425"
- valid_config = {'bootcmd': [
- 'echo {0} $INSTANCE_ID > {1}'.format(my_id, out_file)]}
-
- with mock.patch(self._etmpfile_path, FakeExtendedTempFile):
- with self.allow_subp(['/bin/sh']):
- handle('cc_bootcmd', valid_config, cc, LOG, [])
- self.assertEqual(my_id + ' iid-datasource-none\n',
- util.load_file(out_file))
-
- def test_handler_runs_bootcmd_script_with_error(self):
- """When a valid script generates an error, that error is raised."""
- cc = self._get_cloud('ubuntu')
- valid_config = {'bootcmd': ['exit 1']} # Script with error
-
- with mock.patch(self._etmpfile_path, FakeExtendedTempFile):
- with self.allow_subp(['/bin/sh']):
- with self.assertRaises(subp.ProcessExecutionError) as ctxt:
- handle('does-not-matter', valid_config, cc, LOG, [])
- self.assertIn(
- 'Unexpected error while running command.\n'
- "Command: ['/bin/sh',",
- str(ctxt.exception))
- self.assertIn(
- 'Failed to run bootcmd module does-not-matter',
- self.logs.getvalue())
-
-
-@skipUnlessJsonSchema()
-class TestSchema(CiTestCase, SchemaTestCaseMixin):
- """Directly test schema rather than through handle."""
-
- schema = schema
-
- def test_duplicates_are_fine_array_array(self):
- """Duplicated commands array/array entries are allowed."""
- self.assertSchemaValid(
- ["byebye", "byebye"], 'command entries can be duplicate')
-
- def test_duplicates_are_fine_array_string(self):
- """Duplicated commands array/string entries are allowed."""
- self.assertSchemaValid(
- ["echo bye", "echo bye"], "command entries can be duplicate.")
-
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_ca_certs.py b/tests/unittests/test_handler/test_handler_ca_certs.py
deleted file mode 100644
index e74a0a08..00000000
--- a/tests/unittests/test_handler/test_handler_ca_certs.py
+++ /dev/null
@@ -1,298 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit import cloud
-from cloudinit import distros
-from cloudinit.config import cc_ca_certs
-from cloudinit import helpers
-from cloudinit import subp
-from cloudinit import util
-
-from cloudinit.tests.helpers import TestCase
-
-import logging
-import shutil
-import tempfile
-import unittest
-from contextlib import ExitStack
-from unittest import mock
-
-
-class TestNoConfig(unittest.TestCase):
- def setUp(self):
- super(TestNoConfig, self).setUp()
- self.name = "ca-certs"
- self.cloud_init = None
- self.log = logging.getLogger("TestNoConfig")
- self.args = []
-
- def test_no_config(self):
- """
- Test that nothing is done if no ca-certs configuration is provided.
- """
- config = util.get_builtin_cfg()
- with ExitStack() as mocks:
- util_mock = mocks.enter_context(
- mock.patch.object(util, 'write_file'))
- certs_mock = mocks.enter_context(
- mock.patch.object(cc_ca_certs, 'update_ca_certs'))
-
- cc_ca_certs.handle(self.name, config, self.cloud_init, self.log,
- self.args)
-
- self.assertEqual(util_mock.call_count, 0)
- self.assertEqual(certs_mock.call_count, 0)
-
-
-class TestConfig(TestCase):
- def setUp(self):
- super(TestConfig, self).setUp()
- self.name = "ca-certs"
- distro = self._fetch_distro('ubuntu')
- self.paths = None
- self.cloud = cloud.Cloud(None, self.paths, None, distro, None)
- self.log = logging.getLogger("TestNoConfig")
- self.args = []
-
- self.mocks = ExitStack()
- self.addCleanup(self.mocks.close)
-
- # Mock out the functions that actually modify the system
- self.mock_add = self.mocks.enter_context(
- mock.patch.object(cc_ca_certs, 'add_ca_certs'))
- self.mock_update = self.mocks.enter_context(
- mock.patch.object(cc_ca_certs, 'update_ca_certs'))
- self.mock_remove = self.mocks.enter_context(
- mock.patch.object(cc_ca_certs, 'remove_default_ca_certs'))
-
- def _fetch_distro(self, kind):
- cls = distros.fetch(kind)
- paths = helpers.Paths({})
- return cls(kind, {}, paths)
-
- def test_no_trusted_list(self):
- """
- Test that no certificates are written if the 'trusted' key is not
- present.
- """
- config = {"ca-certs": {}}
-
- cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
-
- self.assertEqual(self.mock_add.call_count, 0)
- self.assertEqual(self.mock_update.call_count, 1)
- self.assertEqual(self.mock_remove.call_count, 0)
-
- def test_empty_trusted_list(self):
- """Test that no certificate are written if 'trusted' list is empty."""
- config = {"ca-certs": {"trusted": []}}
-
- cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
-
- self.assertEqual(self.mock_add.call_count, 0)
- self.assertEqual(self.mock_update.call_count, 1)
- self.assertEqual(self.mock_remove.call_count, 0)
-
- def test_single_trusted(self):
- """Test that a single cert gets passed to add_ca_certs."""
- config = {"ca-certs": {"trusted": ["CERT1"]}}
-
- cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
-
- self.mock_add.assert_called_once_with(['CERT1'])
- self.assertEqual(self.mock_update.call_count, 1)
- self.assertEqual(self.mock_remove.call_count, 0)
-
- def test_multiple_trusted(self):
- """Test that multiple certs get passed to add_ca_certs."""
- config = {"ca-certs": {"trusted": ["CERT1", "CERT2"]}}
-
- cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
-
- self.mock_add.assert_called_once_with(['CERT1', 'CERT2'])
- self.assertEqual(self.mock_update.call_count, 1)
- self.assertEqual(self.mock_remove.call_count, 0)
-
- def test_remove_default_ca_certs(self):
- """Test remove_defaults works as expected."""
- config = {"ca-certs": {"remove-defaults": True}}
-
- cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
-
- self.assertEqual(self.mock_add.call_count, 0)
- self.assertEqual(self.mock_update.call_count, 1)
- self.assertEqual(self.mock_remove.call_count, 1)
-
- def test_no_remove_defaults_if_false(self):
- """Test remove_defaults is not called when config value is False."""
- config = {"ca-certs": {"remove-defaults": False}}
-
- cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
-
- self.assertEqual(self.mock_add.call_count, 0)
- self.assertEqual(self.mock_update.call_count, 1)
- self.assertEqual(self.mock_remove.call_count, 0)
-
- def test_correct_order_for_remove_then_add(self):
- """Test remove_defaults is not called when config value is False."""
- config = {"ca-certs": {"remove-defaults": True, "trusted": ["CERT1"]}}
-
- cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
-
- self.mock_add.assert_called_once_with(['CERT1'])
- self.assertEqual(self.mock_update.call_count, 1)
- self.assertEqual(self.mock_remove.call_count, 1)
-
-
-class TestAddCaCerts(TestCase):
-
- def setUp(self):
- super(TestAddCaCerts, self).setUp()
- tmpdir = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, tmpdir)
- self.paths = helpers.Paths({
- 'cloud_dir': tmpdir,
- })
-
- def test_no_certs_in_list(self):
- """Test that no certificate are written if not provided."""
- with mock.patch.object(util, 'write_file') as mockobj:
- cc_ca_certs.add_ca_certs([])
- self.assertEqual(mockobj.call_count, 0)
-
- def test_single_cert_trailing_cr(self):
- """Test adding a single certificate to the trusted CAs
- when existing ca-certificates has trailing newline"""
- cert = "CERT1\nLINE2\nLINE3"
-
- ca_certs_content = "line1\nline2\ncloud-init-ca-certs.crt\nline3\n"
- expected = "line1\nline2\nline3\ncloud-init-ca-certs.crt\n"
-
- with ExitStack() as mocks:
- mock_write = mocks.enter_context(
- mock.patch.object(util, 'write_file'))
- mock_load = mocks.enter_context(
- mock.patch.object(util, 'load_file',
- return_value=ca_certs_content))
-
- cc_ca_certs.add_ca_certs([cert])
-
- mock_write.assert_has_calls([
- mock.call("/usr/share/ca-certificates/cloud-init-ca-certs.crt",
- cert, mode=0o644),
- mock.call("/etc/ca-certificates.conf", expected, omode="wb")])
- mock_load.assert_called_once_with("/etc/ca-certificates.conf")
-
- def test_single_cert_no_trailing_cr(self):
- """Test adding a single certificate to the trusted CAs
- when existing ca-certificates has no trailing newline"""
- cert = "CERT1\nLINE2\nLINE3"
-
- ca_certs_content = "line1\nline2\nline3"
-
- with ExitStack() as mocks:
- mock_write = mocks.enter_context(
- mock.patch.object(util, 'write_file'))
- mock_load = mocks.enter_context(
- mock.patch.object(util, 'load_file',
- return_value=ca_certs_content))
-
- cc_ca_certs.add_ca_certs([cert])
-
- mock_write.assert_has_calls([
- mock.call("/usr/share/ca-certificates/cloud-init-ca-certs.crt",
- cert, mode=0o644),
- mock.call("/etc/ca-certificates.conf",
- "%s\n%s\n" % (ca_certs_content,
- "cloud-init-ca-certs.crt"),
- omode="wb")])
-
- mock_load.assert_called_once_with("/etc/ca-certificates.conf")
-
- def test_single_cert_to_empty_existing_ca_file(self):
- """Test adding a single certificate to the trusted CAs
- when existing ca-certificates.conf is empty"""
- cert = "CERT1\nLINE2\nLINE3"
-
- expected = "cloud-init-ca-certs.crt\n"
-
- with ExitStack() as mocks:
- mock_write = mocks.enter_context(
- mock.patch.object(util, 'write_file', autospec=True))
- mock_stat = mocks.enter_context(
- mock.patch("cloudinit.config.cc_ca_certs.os.stat")
- )
- mock_stat.return_value.st_size = 0
-
- cc_ca_certs.add_ca_certs([cert])
-
- mock_write.assert_has_calls([
- mock.call("/usr/share/ca-certificates/cloud-init-ca-certs.crt",
- cert, mode=0o644),
- mock.call("/etc/ca-certificates.conf", expected, omode="wb")])
-
- def test_multiple_certs(self):
- """Test adding multiple certificates to the trusted CAs."""
- certs = ["CERT1\nLINE2\nLINE3", "CERT2\nLINE2\nLINE3"]
- expected_cert_file = "\n".join(certs)
- ca_certs_content = "line1\nline2\nline3"
-
- with ExitStack() as mocks:
- mock_write = mocks.enter_context(
- mock.patch.object(util, 'write_file'))
- mock_load = mocks.enter_context(
- mock.patch.object(util, 'load_file',
- return_value=ca_certs_content))
-
- cc_ca_certs.add_ca_certs(certs)
-
- mock_write.assert_has_calls([
- mock.call("/usr/share/ca-certificates/cloud-init-ca-certs.crt",
- expected_cert_file, mode=0o644),
- mock.call("/etc/ca-certificates.conf",
- "%s\n%s\n" % (ca_certs_content,
- "cloud-init-ca-certs.crt"),
- omode='wb')])
-
- mock_load.assert_called_once_with("/etc/ca-certificates.conf")
-
-
-class TestUpdateCaCerts(unittest.TestCase):
- def test_commands(self):
- with mock.patch.object(subp, 'subp') as mockobj:
- cc_ca_certs.update_ca_certs()
- mockobj.assert_called_once_with(
- ["update-ca-certificates"], capture=False)
-
-
-class TestRemoveDefaultCaCerts(TestCase):
-
- def setUp(self):
- super(TestRemoveDefaultCaCerts, self).setUp()
- tmpdir = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, tmpdir)
- self.paths = helpers.Paths({
- 'cloud_dir': tmpdir,
- })
-
- def test_commands(self):
- with ExitStack() as mocks:
- mock_delete = mocks.enter_context(
- mock.patch.object(util, 'delete_dir_contents'))
- mock_write = mocks.enter_context(
- mock.patch.object(util, 'write_file'))
- mock_subp = mocks.enter_context(mock.patch.object(subp, 'subp'))
-
- cc_ca_certs.remove_default_ca_certs('ubuntu')
-
- mock_delete.assert_has_calls([
- mock.call("/usr/share/ca-certificates/"),
- mock.call("/etc/ssl/certs/")])
-
- mock_write.assert_called_once_with(
- "/etc/ca-certificates.conf", "", mode=0o644)
-
- mock_subp.assert_called_once_with(
- ('debconf-set-selections', '-'),
- "ca-certificates ca-certificates/trust_new_crts select no")
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_chef.py b/tests/unittests/test_handler/test_handler_chef.py
deleted file mode 100644
index 7918c609..00000000
--- a/tests/unittests/test_handler/test_handler_chef.py
+++ /dev/null
@@ -1,280 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import httpretty
-import json
-import logging
-import os
-
-from cloudinit import cloud
-from cloudinit.config import cc_chef
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit.sources import DataSourceNone
-from cloudinit import util
-
-from cloudinit.tests.helpers import (
- HttprettyTestCase, FilesystemMockingTestCase, mock, skipIf)
-
-LOG = logging.getLogger(__name__)
-
-CLIENT_TEMPL = os.path.sep.join(["templates", "chef_client.rb.tmpl"])
-
-# This is adjusted to use http because using with https causes issue
-# in some openssl/httpretty combinations.
-# https://github.com/gabrielfalcao/HTTPretty/issues/242
-# We saw issue in opensuse 42.3 with
-# httpretty=0.8.8-7.1 ndg-httpsclient=0.4.0-3.2 pyOpenSSL=16.0.0-4.1
-OMNIBUS_URL_HTTP = cc_chef.OMNIBUS_URL.replace("https:", "http:")
-
-
-class TestInstallChefOmnibus(HttprettyTestCase):
-
- def setUp(self):
- super(TestInstallChefOmnibus, self).setUp()
- self.new_root = self.tmp_dir()
-
- @mock.patch("cloudinit.config.cc_chef.OMNIBUS_URL", OMNIBUS_URL_HTTP)
- def test_install_chef_from_omnibus_runs_chef_url_content(self):
- """install_chef_from_omnibus calls subp_blob_in_tempfile."""
- response = b'#!/bin/bash\necho "Hi Mom"'
- httpretty.register_uri(
- httpretty.GET, cc_chef.OMNIBUS_URL, body=response, status=200)
- ret = (None, None) # stdout, stderr but capture=False
-
- with mock.patch("cloudinit.config.cc_chef.subp_blob_in_tempfile",
- return_value=ret) as m_subp_blob:
- cc_chef.install_chef_from_omnibus()
- # admittedly whitebox, but assuming subp_blob_in_tempfile works
- # this should be fine.
- self.assertEqual(
- [mock.call(blob=response, args=[], basename='chef-omnibus-install',
- capture=False)],
- m_subp_blob.call_args_list)
-
- @mock.patch('cloudinit.config.cc_chef.url_helper.readurl')
- @mock.patch('cloudinit.config.cc_chef.subp_blob_in_tempfile')
- def test_install_chef_from_omnibus_retries_url(self, m_subp_blob, m_rdurl):
- """install_chef_from_omnibus retries OMNIBUS_URL upon failure."""
-
- class FakeURLResponse(object):
- contents = '#!/bin/bash\necho "Hi Mom" > {0}/chef.out'.format(
- self.new_root)
-
- m_rdurl.return_value = FakeURLResponse()
-
- cc_chef.install_chef_from_omnibus()
- expected_kwargs = {'retries': cc_chef.OMNIBUS_URL_RETRIES,
- 'url': cc_chef.OMNIBUS_URL}
- self.assertCountEqual(expected_kwargs, m_rdurl.call_args_list[0][1])
- cc_chef.install_chef_from_omnibus(retries=10)
- expected_kwargs = {'retries': 10,
- 'url': cc_chef.OMNIBUS_URL}
- self.assertCountEqual(expected_kwargs, m_rdurl.call_args_list[1][1])
- expected_subp_kwargs = {
- 'args': ['-v', '2.0'],
- 'basename': 'chef-omnibus-install',
- 'blob': m_rdurl.return_value.contents,
- 'capture': False
- }
- self.assertCountEqual(
- expected_subp_kwargs,
- m_subp_blob.call_args_list[0][1])
-
- @mock.patch("cloudinit.config.cc_chef.OMNIBUS_URL", OMNIBUS_URL_HTTP)
- @mock.patch('cloudinit.config.cc_chef.subp_blob_in_tempfile')
- def test_install_chef_from_omnibus_has_omnibus_version(self, m_subp_blob):
- """install_chef_from_omnibus provides version arg to OMNIBUS_URL."""
- chef_outfile = self.tmp_path('chef.out', self.new_root)
- response = '#!/bin/bash\necho "Hi Mom" > {0}'.format(chef_outfile)
- httpretty.register_uri(
- httpretty.GET, cc_chef.OMNIBUS_URL, body=response)
- cc_chef.install_chef_from_omnibus(omnibus_version='2.0')
-
- called_kwargs = m_subp_blob.call_args_list[0][1]
- expected_kwargs = {
- 'args': ['-v', '2.0'],
- 'basename': 'chef-omnibus-install',
- 'blob': response,
- 'capture': False
- }
- self.assertCountEqual(expected_kwargs, called_kwargs)
-
-
-class TestChef(FilesystemMockingTestCase):
-
- def setUp(self):
- super(TestChef, self).setUp()
- self.tmp = self.tmp_dir()
-
- def fetch_cloud(self, distro_kind):
- cls = distros.fetch(distro_kind)
- paths = helpers.Paths({})
- distro = cls(distro_kind, {}, paths)
- ds = DataSourceNone.DataSourceNone({}, distro, paths, None)
- return cloud.Cloud(ds, paths, {}, distro, None)
-
- def test_no_config(self):
- self.patchUtils(self.tmp)
- self.patchOS(self.tmp)
-
- cfg = {}
- cc_chef.handle('chef', cfg, self.fetch_cloud('ubuntu'), LOG, [])
- for d in cc_chef.CHEF_DIRS:
- self.assertFalse(os.path.isdir(d))
-
- @skipIf(not os.path.isfile(CLIENT_TEMPL),
- CLIENT_TEMPL + " is not available")
- def test_basic_config(self):
- """
- test basic config looks sane
-
- # This should create a file of the format...
- # Created by cloud-init v. 0.7.6 on Sat, 11 Oct 2014 23:57:21 +0000
- chef_license "accept"
- log_level :info
- ssl_verify_mode :verify_none
- log_location "/var/log/chef/client.log"
- validation_client_name "bob"
- validation_key "/etc/chef/validation.pem"
- client_key "/etc/chef/client.pem"
- chef_server_url "localhost"
- environment "_default"
- node_name "iid-datasource-none"
- json_attribs "/etc/chef/firstboot.json"
- file_cache_path "/var/cache/chef"
- file_backup_path "/var/backups/chef"
- pid_file "/var/run/chef/client.pid"
- Chef::Log::Formatter.show_time = true
- encrypted_data_bag_secret "/etc/chef/encrypted_data_bag_secret"
- """
- tpl_file = util.load_file('templates/chef_client.rb.tmpl')
- self.patchUtils(self.tmp)
- self.patchOS(self.tmp)
-
- util.write_file('/etc/cloud/templates/chef_client.rb.tmpl', tpl_file)
- cfg = {
- 'chef': {
- 'chef_license': "accept",
- 'server_url': 'localhost',
- 'validation_name': 'bob',
- 'validation_key': "/etc/chef/vkey.pem",
- 'validation_cert': "this is my cert",
- 'encrypted_data_bag_secret':
- '/etc/chef/encrypted_data_bag_secret'
- },
- }
- cc_chef.handle('chef', cfg, self.fetch_cloud('ubuntu'), LOG, [])
- for d in cc_chef.CHEF_DIRS:
- self.assertTrue(os.path.isdir(d))
- c = util.load_file(cc_chef.CHEF_RB_PATH)
-
- # the content of these keys is not expected to be rendered to tmpl
- unrendered_keys = ('validation_cert',)
- for k, v in cfg['chef'].items():
- if k in unrendered_keys:
- continue
- self.assertIn(v, c)
- for k, v in cc_chef.CHEF_RB_TPL_DEFAULTS.items():
- if k in unrendered_keys:
- continue
- # the value from the cfg overrides that in the default
- val = cfg['chef'].get(k, v)
- if isinstance(val, str):
- self.assertIn(val, c)
- c = util.load_file(cc_chef.CHEF_FB_PATH)
- self.assertEqual({}, json.loads(c))
-
- def test_firstboot_json(self):
- self.patchUtils(self.tmp)
- self.patchOS(self.tmp)
-
- cfg = {
- 'chef': {
- 'server_url': 'localhost',
- 'validation_name': 'bob',
- 'run_list': ['a', 'b', 'c'],
- 'initial_attributes': {
- 'c': 'd',
- }
- },
- }
- cc_chef.handle('chef', cfg, self.fetch_cloud('ubuntu'), LOG, [])
- c = util.load_file(cc_chef.CHEF_FB_PATH)
- self.assertEqual(
- {
- 'run_list': ['a', 'b', 'c'],
- 'c': 'd',
- }, json.loads(c))
-
- @skipIf(not os.path.isfile(CLIENT_TEMPL),
- CLIENT_TEMPL + " is not available")
- def test_template_deletes(self):
- tpl_file = util.load_file('templates/chef_client.rb.tmpl')
- self.patchUtils(self.tmp)
- self.patchOS(self.tmp)
-
- util.write_file('/etc/cloud/templates/chef_client.rb.tmpl', tpl_file)
- cfg = {
- 'chef': {
- 'server_url': 'localhost',
- 'validation_name': 'bob',
- 'json_attribs': None,
- 'show_time': None,
- },
- }
- cc_chef.handle('chef', cfg, self.fetch_cloud('ubuntu'), LOG, [])
- c = util.load_file(cc_chef.CHEF_RB_PATH)
- self.assertNotIn('json_attribs', c)
- self.assertNotIn('Formatter.show_time', c)
-
- @skipIf(not os.path.isfile(CLIENT_TEMPL),
- CLIENT_TEMPL + " is not available")
- def test_validation_cert_and_validation_key(self):
- # test validation_cert content is written to validation_key path
- tpl_file = util.load_file('templates/chef_client.rb.tmpl')
- self.patchUtils(self.tmp)
- self.patchOS(self.tmp)
-
- util.write_file('/etc/cloud/templates/chef_client.rb.tmpl', tpl_file)
- v_path = '/etc/chef/vkey.pem'
- v_cert = 'this is my cert'
- cfg = {
- 'chef': {
- 'server_url': 'localhost',
- 'validation_name': 'bob',
- 'validation_key': v_path,
- 'validation_cert': v_cert
- },
- }
- cc_chef.handle('chef', cfg, self.fetch_cloud('ubuntu'), LOG, [])
- content = util.load_file(cc_chef.CHEF_RB_PATH)
- self.assertIn(v_path, content)
- util.load_file(v_path)
- self.assertEqual(v_cert, util.load_file(v_path))
-
- def test_validation_cert_with_system(self):
- # test validation_cert content is not written over system file
- tpl_file = util.load_file('templates/chef_client.rb.tmpl')
- self.patchUtils(self.tmp)
- self.patchOS(self.tmp)
-
- v_path = '/etc/chef/vkey.pem'
- v_cert = "system"
- expected_cert = "this is the system file certificate"
- cfg = {
- 'chef': {
- 'server_url': 'localhost',
- 'validation_name': 'bob',
- 'validation_key': v_path,
- 'validation_cert': v_cert
- },
- }
- util.write_file('/etc/cloud/templates/chef_client.rb.tmpl', tpl_file)
- util.write_file(v_path, expected_cert)
- cc_chef.handle('chef', cfg, self.fetch_cloud('ubuntu'), LOG, [])
- content = util.load_file(cc_chef.CHEF_RB_PATH)
- self.assertIn(v_path, content)
- util.load_file(v_path)
- self.assertEqual(expected_cert, util.load_file(v_path))
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_debug.py b/tests/unittests/test_handler/test_handler_debug.py
deleted file mode 100644
index 787ba350..00000000
--- a/tests/unittests/test_handler/test_handler_debug.py
+++ /dev/null
@@ -1,74 +0,0 @@
-# Copyright (C) 2014 Yahoo! Inc.
-#
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit.config import cc_debug
-
-from cloudinit import cloud
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import util
-
-from cloudinit.sources import DataSourceNone
-
-from cloudinit.tests.helpers import (FilesystemMockingTestCase, mock)
-
-import logging
-import shutil
-import tempfile
-
-LOG = logging.getLogger(__name__)
-
-
-@mock.patch('cloudinit.distros.debian.read_system_locale')
-class TestDebug(FilesystemMockingTestCase):
- def setUp(self):
- super(TestDebug, self).setUp()
- self.new_root = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.new_root)
-
- def _get_cloud(self, distro, metadata=None):
- self.patchUtils(self.new_root)
- paths = helpers.Paths({})
- cls = distros.fetch(distro)
- d = cls(distro, {}, paths)
- ds = DataSourceNone.DataSourceNone({}, d, paths)
- if metadata:
- ds.metadata.update(metadata)
- return cloud.Cloud(ds, paths, {}, d, None)
-
- def test_debug_write(self, m_locale):
- m_locale.return_value = 'en_US.UTF-8'
- cfg = {
- 'abc': '123',
- 'c': u'\u20a0',
- 'debug': {
- 'verbose': True,
- # Does not actually write here due to mocking...
- 'output': '/var/log/cloud-init-debug.log',
- },
- }
- cc = self._get_cloud('ubuntu')
- cc_debug.handle('cc_debug', cfg, cc, LOG, [])
- contents = util.load_file('/var/log/cloud-init-debug.log')
- # Some basic sanity tests...
- self.assertNotEqual(0, len(contents))
- for k in cfg.keys():
- self.assertIn(k, contents)
-
- def test_debug_no_write(self, m_locale):
- m_locale.return_value = 'en_US.UTF-8'
- cfg = {
- 'abc': '123',
- 'debug': {
- 'verbose': False,
- # Does not actually write here due to mocking...
- 'output': '/var/log/cloud-init-debug.log',
- },
- }
- cc = self._get_cloud('ubuntu')
- cc_debug.handle('cc_debug', cfg, cc, LOG, [])
- self.assertRaises(IOError,
- util.load_file, '/var/log/cloud-init-debug.log')
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_disk_setup.py b/tests/unittests/test_handler/test_handler_disk_setup.py
deleted file mode 100644
index 4f4a57fa..00000000
--- a/tests/unittests/test_handler/test_handler_disk_setup.py
+++ /dev/null
@@ -1,243 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import random
-
-from cloudinit.config import cc_disk_setup
-from cloudinit.tests.helpers import CiTestCase, ExitStack, mock, TestCase
-
-
-class TestIsDiskUsed(TestCase):
-
- def setUp(self):
- super(TestIsDiskUsed, self).setUp()
- self.patches = ExitStack()
- mod_name = 'cloudinit.config.cc_disk_setup'
- self.enumerate_disk = self.patches.enter_context(
- mock.patch('{0}.enumerate_disk'.format(mod_name)))
- self.check_fs = self.patches.enter_context(
- mock.patch('{0}.check_fs'.format(mod_name)))
-
- def tearDown(self):
- super(TestIsDiskUsed, self).tearDown()
- self.patches.close()
-
- def test_multiple_child_nodes_returns_true(self):
- self.enumerate_disk.return_value = (mock.MagicMock() for _ in range(2))
- self.check_fs.return_value = (mock.MagicMock(), None, mock.MagicMock())
- self.assertTrue(cc_disk_setup.is_disk_used(mock.MagicMock()))
-
- def test_valid_filesystem_returns_true(self):
- self.enumerate_disk.return_value = (mock.MagicMock() for _ in range(1))
- self.check_fs.return_value = (
- mock.MagicMock(), 'ext4', mock.MagicMock())
- self.assertTrue(cc_disk_setup.is_disk_used(mock.MagicMock()))
-
- def test_one_child_nodes_and_no_fs_returns_false(self):
- self.enumerate_disk.return_value = (mock.MagicMock() for _ in range(1))
- self.check_fs.return_value = (mock.MagicMock(), None, mock.MagicMock())
- self.assertFalse(cc_disk_setup.is_disk_used(mock.MagicMock()))
-
-
-class TestGetMbrHddSize(TestCase):
-
- def setUp(self):
- super(TestGetMbrHddSize, self).setUp()
- self.patches = ExitStack()
- self.subp = self.patches.enter_context(
- mock.patch.object(cc_disk_setup.subp, 'subp'))
-
- def tearDown(self):
- super(TestGetMbrHddSize, self).tearDown()
- self.patches.close()
-
- def _configure_subp_mock(self, hdd_size_in_bytes, sector_size_in_bytes):
- def _subp(cmd, *args, **kwargs):
- self.assertEqual(3, len(cmd))
- if '--getsize64' in cmd:
- return hdd_size_in_bytes, None
- elif '--getss' in cmd:
- return sector_size_in_bytes, None
- raise Exception('Unexpected blockdev command called')
-
- self.subp.side_effect = _subp
-
- def _test_for_sector_size(self, sector_size):
- size_in_bytes = random.randint(10000, 10000000) * 512
- size_in_sectors = size_in_bytes / sector_size
- self._configure_subp_mock(size_in_bytes, sector_size)
- self.assertEqual(size_in_sectors,
- cc_disk_setup.get_hdd_size('/dev/sda1'))
-
- def test_size_for_512_byte_sectors(self):
- self._test_for_sector_size(512)
-
- def test_size_for_1024_byte_sectors(self):
- self._test_for_sector_size(1024)
-
- def test_size_for_2048_byte_sectors(self):
- self._test_for_sector_size(2048)
-
- def test_size_for_4096_byte_sectors(self):
- self._test_for_sector_size(4096)
-
-
-class TestGetPartitionMbrLayout(TestCase):
-
- def test_single_partition_using_boolean(self):
- self.assertEqual('0,',
- cc_disk_setup.get_partition_mbr_layout(1000, True))
-
- def test_single_partition_using_list(self):
- disk_size = random.randint(1000000, 1000000000000)
- self.assertEqual(
- ',,83',
- cc_disk_setup.get_partition_mbr_layout(disk_size, [100]))
-
- def test_half_and_half(self):
- disk_size = random.randint(1000000, 1000000000000)
- expected_partition_size = int(float(disk_size) / 2)
- self.assertEqual(
- ',{0},83\n,,83'.format(expected_partition_size),
- cc_disk_setup.get_partition_mbr_layout(disk_size, [50, 50]))
-
- def test_thirds_with_different_partition_type(self):
- disk_size = random.randint(1000000, 1000000000000)
- expected_partition_size = int(float(disk_size) * 0.33)
- self.assertEqual(
- ',{0},83\n,,82'.format(expected_partition_size),
- cc_disk_setup.get_partition_mbr_layout(disk_size, [33, [66, 82]]))
-
-
-class TestUpdateFsSetupDevices(TestCase):
- def test_regression_1634678(self):
- # Cf. https://bugs.launchpad.net/cloud-init/+bug/1634678
- fs_setup = {
- 'partition': 'auto',
- 'device': '/dev/xvdb1',
- 'overwrite': False,
- 'label': 'test',
- 'filesystem': 'ext4'
- }
-
- cc_disk_setup.update_fs_setup_devices([fs_setup],
- lambda device: device)
-
- self.assertEqual({
- '_origname': '/dev/xvdb1',
- 'partition': 'auto',
- 'device': '/dev/xvdb1',
- 'overwrite': False,
- 'label': 'test',
- 'filesystem': 'ext4'
- }, fs_setup)
-
- def test_dotted_devname(self):
- fs_setup = {
- 'partition': 'auto',
- 'device': 'ephemeral0.0',
- 'label': 'test2',
- 'filesystem': 'xfs'
- }
-
- cc_disk_setup.update_fs_setup_devices([fs_setup],
- lambda device: device)
-
- self.assertEqual({
- '_origname': 'ephemeral0.0',
- '_partition': 'auto',
- 'partition': '0',
- 'device': 'ephemeral0',
- 'label': 'test2',
- 'filesystem': 'xfs'
- }, fs_setup)
-
- def test_dotted_devname_populates_partition(self):
- fs_setup = {
- 'device': 'ephemeral0.1',
- 'label': 'test2',
- 'filesystem': 'xfs'
- }
- cc_disk_setup.update_fs_setup_devices([fs_setup],
- lambda device: device)
- self.assertEqual({
- '_origname': 'ephemeral0.1',
- 'device': 'ephemeral0',
- 'partition': '1',
- 'label': 'test2',
- 'filesystem': 'xfs'
- }, fs_setup)
-
-
-@mock.patch('cloudinit.config.cc_disk_setup.assert_and_settle_device',
- return_value=None)
-@mock.patch('cloudinit.config.cc_disk_setup.find_device_node',
- return_value=('/dev/xdb1', False))
-@mock.patch('cloudinit.config.cc_disk_setup.device_type', return_value=None)
-@mock.patch('cloudinit.config.cc_disk_setup.subp.subp', return_value=('', ''))
-class TestMkfsCommandHandling(CiTestCase):
-
- with_logs = True
-
- def test_with_cmd(self, subp, *args):
- """mkfs honors cmd and logs warnings when extra_opts or overwrite are
- provided."""
- cc_disk_setup.mkfs({
- 'cmd': 'mkfs -t %(filesystem)s -L %(label)s %(device)s',
- 'filesystem': 'ext4',
- 'device': '/dev/xdb1',
- 'label': 'with_cmd',
- 'extra_opts': ['should', 'generate', 'warning'],
- 'overwrite': 'should generate warning too'
- })
-
- self.assertIn(
- 'extra_opts ' +
- 'ignored because cmd was specified: mkfs -t ext4 -L with_cmd ' +
- '/dev/xdb1',
- self.logs.getvalue())
- self.assertIn(
- 'overwrite ' +
- 'ignored because cmd was specified: mkfs -t ext4 -L with_cmd ' +
- '/dev/xdb1',
- self.logs.getvalue())
-
- subp.assert_called_once_with(
- 'mkfs -t ext4 -L with_cmd /dev/xdb1', shell=True)
-
- @mock.patch('cloudinit.config.cc_disk_setup.subp.which')
- def test_overwrite_and_extra_opts_without_cmd(self, m_which, subp, *args):
- """mkfs observes extra_opts and overwrite settings when cmd is not
- present."""
- m_which.side_effect = lambda p: {'mkfs.ext4': '/sbin/mkfs.ext4'}[p]
- cc_disk_setup.mkfs({
- 'filesystem': 'ext4',
- 'device': '/dev/xdb1',
- 'label': 'without_cmd',
- 'extra_opts': ['are', 'added'],
- 'overwrite': True
- })
-
- subp.assert_called_once_with(
- ['/sbin/mkfs.ext4', '/dev/xdb1',
- '-L', 'without_cmd', '-F', 'are', 'added'],
- shell=False)
-
- @mock.patch('cloudinit.config.cc_disk_setup.subp.which')
- def test_mkswap(self, m_which, subp, *args):
- """mkfs observes extra_opts and overwrite settings when cmd is not
- present."""
- m_which.side_effect = iter([None, '/sbin/mkswap'])
- cc_disk_setup.mkfs({
- 'filesystem': 'swap',
- 'device': '/dev/xdb1',
- 'label': 'swap',
- 'overwrite': True,
- })
-
- self.assertEqual([mock.call('mkfs.swap'), mock.call('mkswap')],
- m_which.call_args_list)
- subp.assert_called_once_with(
- ['/sbin/mkswap', '/dev/xdb1', '-L', 'swap', '-f'], shell=False)
-
-#
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_etc_hosts.py b/tests/unittests/test_handler/test_handler_etc_hosts.py
deleted file mode 100644
index e3778b11..00000000
--- a/tests/unittests/test_handler/test_handler_etc_hosts.py
+++ /dev/null
@@ -1,70 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit.config import cc_update_etc_hosts
-
-from cloudinit import cloud
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import util
-
-from cloudinit.tests import helpers as t_help
-
-import logging
-import os
-import shutil
-
-LOG = logging.getLogger(__name__)
-
-
-class TestHostsFile(t_help.FilesystemMockingTestCase):
- def setUp(self):
- super(TestHostsFile, self).setUp()
- self.tmp = self.tmp_dir()
-
- def _fetch_distro(self, kind):
- cls = distros.fetch(kind)
- paths = helpers.Paths({})
- return cls(kind, {}, paths)
-
- def test_write_etc_hosts_suse_localhost(self):
- cfg = {
- 'manage_etc_hosts': 'localhost',
- 'hostname': 'cloud-init.test.us'
- }
- os.makedirs('%s/etc/' % self.tmp)
- hosts_content = '192.168.1.1 blah.blah.us blah\n'
- fout = open('%s/etc/hosts' % self.tmp, 'w')
- fout.write(hosts_content)
- fout.close()
- distro = self._fetch_distro('sles')
- distro.hosts_fn = '%s/etc/hosts' % self.tmp
- paths = helpers.Paths({})
- ds = None
- cc = cloud.Cloud(ds, paths, {}, distro, None)
- self.patchUtils(self.tmp)
- cc_update_etc_hosts.handle('test', cfg, cc, LOG, [])
- contents = util.load_file('%s/etc/hosts' % self.tmp)
- if '127.0.1.1\tcloud-init.test.us\tcloud-init' not in contents:
- self.assertIsNone('No entry for 127.0.1.1 in etc/hosts')
- if '192.168.1.1\tblah.blah.us\tblah' not in contents:
- self.assertIsNone('Default etc/hosts content modified')
-
- @t_help.skipUnlessJinja()
- def test_write_etc_hosts_suse_template(self):
- cfg = {
- 'manage_etc_hosts': 'template',
- 'hostname': 'cloud-init.test.us'
- }
- shutil.copytree('templates', '%s/etc/cloud/templates' % self.tmp)
- distro = self._fetch_distro('sles')
- paths = helpers.Paths({})
- paths.template_tpl = '%s' % self.tmp + '/etc/cloud/templates/%s.tmpl'
- ds = None
- cc = cloud.Cloud(ds, paths, {}, distro, None)
- self.patchUtils(self.tmp)
- cc_update_etc_hosts.handle('test', cfg, cc, LOG, [])
- contents = util.load_file('%s/etc/hosts' % self.tmp)
- if '127.0.1.1 cloud-init.test.us cloud-init' not in contents:
- self.assertIsNone('No entry for 127.0.1.1 in etc/hosts')
- if '::1 cloud-init.test.us cloud-init' not in contents:
- self.assertIsNone('No entry for 127.0.0.1 in etc/hosts')
diff --git a/tests/unittests/test_handler/test_handler_landscape.py b/tests/unittests/test_handler/test_handler_landscape.py
deleted file mode 100644
index 7d165687..00000000
--- a/tests/unittests/test_handler/test_handler_landscape.py
+++ /dev/null
@@ -1,130 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit.config import cc_landscape
-from cloudinit import (distros, helpers, cloud, util)
-from cloudinit.sources import DataSourceNone
-from cloudinit.tests.helpers import (FilesystemMockingTestCase, mock,
- wrap_and_call)
-
-from configobj import ConfigObj
-import logging
-
-
-LOG = logging.getLogger(__name__)
-
-
-class TestLandscape(FilesystemMockingTestCase):
-
- with_logs = True
-
- def setUp(self):
- super(TestLandscape, self).setUp()
- self.new_root = self.tmp_dir()
- self.conf = self.tmp_path('client.conf', self.new_root)
- self.default_file = self.tmp_path('default_landscape', self.new_root)
-
- def _get_cloud(self, distro):
- self.patchUtils(self.new_root)
- paths = helpers.Paths({'templates_dir': self.new_root})
- cls = distros.fetch(distro)
- mydist = cls(distro, {}, paths)
- myds = DataSourceNone.DataSourceNone({}, mydist, paths)
- return cloud.Cloud(myds, paths, {}, mydist, None)
-
- def test_handler_skips_empty_landscape_cloudconfig(self):
- """Empty landscape cloud-config section does no work."""
- mycloud = self._get_cloud('ubuntu')
- mycloud.distro = mock.MagicMock()
- cfg = {'landscape': {}}
- cc_landscape.handle('notimportant', cfg, mycloud, LOG, None)
- self.assertFalse(mycloud.distro.install_packages.called)
-
- def test_handler_error_on_invalid_landscape_type(self):
- """Raise an error when landscape configuraiton option is invalid."""
- mycloud = self._get_cloud('ubuntu')
- cfg = {'landscape': 'wrongtype'}
- with self.assertRaises(RuntimeError) as context_manager:
- cc_landscape.handle('notimportant', cfg, mycloud, LOG, None)
- self.assertIn(
- "'landscape' key existed in config, but not a dict",
- str(context_manager.exception))
-
- @mock.patch('cloudinit.config.cc_landscape.subp')
- def test_handler_restarts_landscape_client(self, m_subp):
- """handler restarts lansdscape-client after install."""
- mycloud = self._get_cloud('ubuntu')
- cfg = {'landscape': {'client': {}}}
- wrap_and_call(
- 'cloudinit.config.cc_landscape',
- {'LSC_CLIENT_CFG_FILE': {'new': self.conf}},
- cc_landscape.handle, 'notimportant', cfg, mycloud, LOG, None)
- self.assertEqual(
- [mock.call(['service', 'landscape-client', 'restart'])],
- m_subp.subp.call_args_list)
-
- def test_handler_installs_client_and_creates_config_file(self):
- """Write landscape client.conf and install landscape-client."""
- mycloud = self._get_cloud('ubuntu')
- cfg = {'landscape': {'client': {}}}
- expected = {'client': {
- 'log_level': 'info',
- 'url': 'https://landscape.canonical.com/message-system',
- 'ping_url': 'http://landscape.canonical.com/ping',
- 'data_path': '/var/lib/landscape/client'}}
- mycloud.distro = mock.MagicMock()
- wrap_and_call(
- 'cloudinit.config.cc_landscape',
- {'LSC_CLIENT_CFG_FILE': {'new': self.conf},
- 'LS_DEFAULT_FILE': {'new': self.default_file}},
- cc_landscape.handle, 'notimportant', cfg, mycloud, LOG, None)
- self.assertEqual(
- [mock.call('landscape-client')],
- mycloud.distro.install_packages.call_args)
- self.assertEqual(expected, dict(ConfigObj(self.conf)))
- self.assertIn(
- 'Wrote landscape config file to {0}'.format(self.conf),
- self.logs.getvalue())
- default_content = util.load_file(self.default_file)
- self.assertEqual('RUN=1\n', default_content)
-
- def test_handler_writes_merged_client_config_file_with_defaults(self):
- """Merge and write options from LSC_CLIENT_CFG_FILE with defaults."""
- # Write existing sparse client.conf file
- util.write_file(self.conf, '[client]\ncomputer_title = My PC\n')
- mycloud = self._get_cloud('ubuntu')
- cfg = {'landscape': {'client': {}}}
- expected = {'client': {
- 'log_level': 'info',
- 'url': 'https://landscape.canonical.com/message-system',
- 'ping_url': 'http://landscape.canonical.com/ping',
- 'data_path': '/var/lib/landscape/client',
- 'computer_title': 'My PC'}}
- wrap_and_call(
- 'cloudinit.config.cc_landscape',
- {'LSC_CLIENT_CFG_FILE': {'new': self.conf}},
- cc_landscape.handle, 'notimportant', cfg, mycloud, LOG, None)
- self.assertEqual(expected, dict(ConfigObj(self.conf)))
- self.assertIn(
- 'Wrote landscape config file to {0}'.format(self.conf),
- self.logs.getvalue())
-
- def test_handler_writes_merged_provided_cloudconfig_with_defaults(self):
- """Merge and write options from cloud-config options with defaults."""
- # Write empty sparse client.conf file
- util.write_file(self.conf, '')
- mycloud = self._get_cloud('ubuntu')
- cfg = {'landscape': {'client': {'computer_title': 'My PC'}}}
- expected = {'client': {
- 'log_level': 'info',
- 'url': 'https://landscape.canonical.com/message-system',
- 'ping_url': 'http://landscape.canonical.com/ping',
- 'data_path': '/var/lib/landscape/client',
- 'computer_title': 'My PC'}}
- wrap_and_call(
- 'cloudinit.config.cc_landscape',
- {'LSC_CLIENT_CFG_FILE': {'new': self.conf}},
- cc_landscape.handle, 'notimportant', cfg, mycloud, LOG, None)
- self.assertEqual(expected, dict(ConfigObj(self.conf)))
- self.assertIn(
- 'Wrote landscape config file to {0}'.format(self.conf),
- self.logs.getvalue())
diff --git a/tests/unittests/test_handler/test_handler_locale.py b/tests/unittests/test_handler/test_handler_locale.py
deleted file mode 100644
index 47e7d804..00000000
--- a/tests/unittests/test_handler/test_handler_locale.py
+++ /dev/null
@@ -1,108 +0,0 @@
-# Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
-#
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit.config import cc_locale
-
-from cloudinit import cloud
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import util
-
-from cloudinit.sources import DataSourceNoCloud
-
-from cloudinit.tests import helpers as t_help
-
-from configobj import ConfigObj
-
-import logging
-import os
-import shutil
-import tempfile
-from io import BytesIO
-from unittest import mock
-
-LOG = logging.getLogger(__name__)
-
-
-class TestLocale(t_help.FilesystemMockingTestCase):
-
- def setUp(self):
- super(TestLocale, self).setUp()
- self.new_root = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.new_root)
-
- def _get_cloud(self, distro):
- self.patchUtils(self.new_root)
- paths = helpers.Paths({})
-
- cls = distros.fetch(distro)
- d = cls(distro, {}, paths)
- ds = DataSourceNoCloud.DataSourceNoCloud({}, d, paths)
- cc = cloud.Cloud(ds, paths, {}, d, None)
- return cc
-
- def test_set_locale_sles(self):
-
- cfg = {
- 'locale': 'My.Locale',
- }
- cc = self._get_cloud('sles')
- cc_locale.handle('cc_locale', cfg, cc, LOG, [])
- if cc.distro.uses_systemd():
- locale_conf = cc.distro.systemd_locale_conf_fn
- else:
- locale_conf = cc.distro.locale_conf_fn
- contents = util.load_file(locale_conf, decode=False)
- n_cfg = ConfigObj(BytesIO(contents))
- if cc.distro.uses_systemd():
- self.assertEqual({'LANG': cfg['locale']}, dict(n_cfg))
- else:
- self.assertEqual({'RC_LANG': cfg['locale']}, dict(n_cfg))
-
- def test_set_locale_sles_default(self):
- cfg = {}
- cc = self._get_cloud('sles')
- cc_locale.handle('cc_locale', cfg, cc, LOG, [])
-
- if cc.distro.uses_systemd():
- locale_conf = cc.distro.systemd_locale_conf_fn
- keyname = 'LANG'
- else:
- locale_conf = cc.distro.locale_conf_fn
- keyname = 'RC_LANG'
-
- contents = util.load_file(locale_conf, decode=False)
- n_cfg = ConfigObj(BytesIO(contents))
- self.assertEqual({keyname: 'en_US.UTF-8'}, dict(n_cfg))
-
- def test_locale_update_config_if_different_than_default(self):
- """Test cc_locale writes updates conf if different than default"""
- locale_conf = os.path.join(self.new_root, "etc/default/locale")
- util.write_file(locale_conf, 'LANG="en_US.UTF-8"\n')
- cfg = {'locale': 'C.UTF-8'}
- cc = self._get_cloud('ubuntu')
- with mock.patch('cloudinit.distros.debian.subp.subp') as m_subp:
- with mock.patch('cloudinit.distros.debian.LOCALE_CONF_FN',
- locale_conf):
- cc_locale.handle('cc_locale', cfg, cc, LOG, [])
- m_subp.assert_called_with(['update-locale',
- '--locale-file=%s' % locale_conf,
- 'LANG=C.UTF-8'], capture=False)
-
- def test_locale_rhel_defaults_en_us_utf8(self):
- """Test cc_locale gets en_US.UTF-8 from distro get_locale fallback"""
- cfg = {}
- cc = self._get_cloud('rhel')
- update_sysconfig = 'cloudinit.distros.rhel_util.update_sysconfig_file'
- with mock.patch.object(cc.distro, 'uses_systemd') as m_use_sd:
- m_use_sd.return_value = True
- with mock.patch(update_sysconfig) as m_update_syscfg:
- cc_locale.handle('cc_locale', cfg, cc, LOG, [])
- m_update_syscfg.assert_called_with('/etc/locale.conf',
- {'LANG': 'en_US.UTF-8'})
-
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_lxd.py b/tests/unittests/test_handler/test_handler_lxd.py
deleted file mode 100644
index b2181992..00000000
--- a/tests/unittests/test_handler/test_handler_lxd.py
+++ /dev/null
@@ -1,231 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit.config import cc_lxd
-from cloudinit.sources import DataSourceNoCloud
-from cloudinit import (distros, helpers, cloud)
-from cloudinit.tests import helpers as t_help
-
-from unittest import mock
-
-
-class TestLxd(t_help.CiTestCase):
-
- with_logs = True
-
- lxd_cfg = {
- 'lxd': {
- 'init': {
- 'network_address': '0.0.0.0',
- 'storage_backend': 'zfs',
- 'storage_pool': 'poolname',
- }
- }
- }
-
- def _get_cloud(self, distro):
- cls = distros.fetch(distro)
- paths = helpers.Paths({})
- d = cls(distro, {}, paths)
- ds = DataSourceNoCloud.DataSourceNoCloud({}, d, paths)
- cc = cloud.Cloud(ds, paths, {}, d, None)
- return cc
-
- @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default")
- @mock.patch("cloudinit.config.cc_lxd.subp")
- def test_lxd_init(self, mock_subp, m_maybe_clean):
- cc = self._get_cloud('ubuntu')
- mock_subp.which.return_value = True
- m_maybe_clean.return_value = None
- cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, self.logger, [])
- self.assertTrue(mock_subp.which.called)
- # no bridge config, so maybe_cleanup should not be called.
- self.assertFalse(m_maybe_clean.called)
- self.assertEqual(
- [mock.call(['lxd', 'waitready', '--timeout=300']),
- mock.call(
- ['lxd', 'init', '--auto', '--network-address=0.0.0.0',
- '--storage-backend=zfs', '--storage-pool=poolname'])],
- mock_subp.subp.call_args_list)
-
- @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default")
- @mock.patch("cloudinit.config.cc_lxd.subp")
- def test_lxd_install(self, mock_subp, m_maybe_clean):
- cc = self._get_cloud('ubuntu')
- cc.distro = mock.MagicMock()
- mock_subp.which.return_value = None
- cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, self.logger, [])
- self.assertNotIn('WARN', self.logs.getvalue())
- self.assertTrue(cc.distro.install_packages.called)
- cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, self.logger, [])
- self.assertFalse(m_maybe_clean.called)
- install_pkg = cc.distro.install_packages.call_args_list[0][0][0]
- self.assertEqual(sorted(install_pkg), ['lxd', 'zfsutils-linux'])
-
- @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default")
- @mock.patch("cloudinit.config.cc_lxd.subp")
- def test_no_init_does_nothing(self, mock_subp, m_maybe_clean):
- cc = self._get_cloud('ubuntu')
- cc.distro = mock.MagicMock()
- cc_lxd.handle('cc_lxd', {'lxd': {}}, cc, self.logger, [])
- self.assertFalse(cc.distro.install_packages.called)
- self.assertFalse(mock_subp.subp.called)
- self.assertFalse(m_maybe_clean.called)
-
- @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default")
- @mock.patch("cloudinit.config.cc_lxd.subp")
- def test_no_lxd_does_nothing(self, mock_subp, m_maybe_clean):
- cc = self._get_cloud('ubuntu')
- cc.distro = mock.MagicMock()
- cc_lxd.handle('cc_lxd', {'package_update': True}, cc, self.logger, [])
- self.assertFalse(cc.distro.install_packages.called)
- self.assertFalse(mock_subp.subp.called)
- self.assertFalse(m_maybe_clean.called)
-
- def test_lxd_debconf_new_full(self):
- data = {"mode": "new",
- "name": "testbr0",
- "ipv4_address": "10.0.8.1",
- "ipv4_netmask": "24",
- "ipv4_dhcp_first": "10.0.8.2",
- "ipv4_dhcp_last": "10.0.8.254",
- "ipv4_dhcp_leases": "250",
- "ipv4_nat": "true",
- "ipv6_address": "fd98:9e0:3744::1",
- "ipv6_netmask": "64",
- "ipv6_nat": "true",
- "domain": "lxd"}
- self.assertEqual(
- cc_lxd.bridge_to_debconf(data),
- {"lxd/setup-bridge": "true",
- "lxd/bridge-name": "testbr0",
- "lxd/bridge-ipv4": "true",
- "lxd/bridge-ipv4-address": "10.0.8.1",
- "lxd/bridge-ipv4-netmask": "24",
- "lxd/bridge-ipv4-dhcp-first": "10.0.8.2",
- "lxd/bridge-ipv4-dhcp-last": "10.0.8.254",
- "lxd/bridge-ipv4-dhcp-leases": "250",
- "lxd/bridge-ipv4-nat": "true",
- "lxd/bridge-ipv6": "true",
- "lxd/bridge-ipv6-address": "fd98:9e0:3744::1",
- "lxd/bridge-ipv6-netmask": "64",
- "lxd/bridge-ipv6-nat": "true",
- "lxd/bridge-domain": "lxd"})
-
- def test_lxd_debconf_new_partial(self):
- data = {"mode": "new",
- "ipv6_address": "fd98:9e0:3744::1",
- "ipv6_netmask": "64",
- "ipv6_nat": "true"}
- self.assertEqual(
- cc_lxd.bridge_to_debconf(data),
- {"lxd/setup-bridge": "true",
- "lxd/bridge-ipv6": "true",
- "lxd/bridge-ipv6-address": "fd98:9e0:3744::1",
- "lxd/bridge-ipv6-netmask": "64",
- "lxd/bridge-ipv6-nat": "true"})
-
- def test_lxd_debconf_existing(self):
- data = {"mode": "existing",
- "name": "testbr0"}
- self.assertEqual(
- cc_lxd.bridge_to_debconf(data),
- {"lxd/setup-bridge": "false",
- "lxd/use-existing-bridge": "true",
- "lxd/bridge-name": "testbr0"})
-
- def test_lxd_debconf_none(self):
- data = {"mode": "none"}
- self.assertEqual(
- cc_lxd.bridge_to_debconf(data),
- {"lxd/setup-bridge": "false",
- "lxd/bridge-name": ""})
-
- def test_lxd_cmd_new_full(self):
- data = {"mode": "new",
- "name": "testbr0",
- "ipv4_address": "10.0.8.1",
- "ipv4_netmask": "24",
- "ipv4_dhcp_first": "10.0.8.2",
- "ipv4_dhcp_last": "10.0.8.254",
- "ipv4_dhcp_leases": "250",
- "ipv4_nat": "true",
- "ipv6_address": "fd98:9e0:3744::1",
- "ipv6_netmask": "64",
- "ipv6_nat": "true",
- "domain": "lxd"}
- self.assertEqual(
- cc_lxd.bridge_to_cmd(data),
- (["network", "create", "testbr0",
- "ipv4.address=10.0.8.1/24", "ipv4.nat=true",
- "ipv4.dhcp.ranges=10.0.8.2-10.0.8.254",
- "ipv6.address=fd98:9e0:3744::1/64",
- "ipv6.nat=true", "dns.domain=lxd"],
- ["network", "attach-profile",
- "testbr0", "default", "eth0"]))
-
- def test_lxd_cmd_new_partial(self):
- data = {"mode": "new",
- "ipv6_address": "fd98:9e0:3744::1",
- "ipv6_netmask": "64",
- "ipv6_nat": "true"}
- self.assertEqual(
- cc_lxd.bridge_to_cmd(data),
- (["network", "create", "lxdbr0", "ipv4.address=none",
- "ipv6.address=fd98:9e0:3744::1/64", "ipv6.nat=true"],
- ["network", "attach-profile",
- "lxdbr0", "default", "eth0"]))
-
- def test_lxd_cmd_existing(self):
- data = {"mode": "existing",
- "name": "testbr0"}
- self.assertEqual(
- cc_lxd.bridge_to_cmd(data),
- (None, ["network", "attach-profile",
- "testbr0", "default", "eth0"]))
-
- def test_lxd_cmd_none(self):
- data = {"mode": "none"}
- self.assertEqual(
- cc_lxd.bridge_to_cmd(data),
- (None, None))
-
-
-class TestLxdMaybeCleanupDefault(t_help.CiTestCase):
- """Test the implementation of maybe_cleanup_default."""
-
- defnet = cc_lxd._DEFAULT_NETWORK_NAME
-
- @mock.patch("cloudinit.config.cc_lxd._lxc")
- def test_network_other_than_default_not_deleted(self, m_lxc):
- """deletion or removal should only occur if bridge is default."""
- cc_lxd.maybe_cleanup_default(
- net_name="lxdbr1", did_init=True, create=True, attach=True)
- m_lxc.assert_not_called()
-
- @mock.patch("cloudinit.config.cc_lxd._lxc")
- def test_did_init_false_does_not_delete(self, m_lxc):
- """deletion or removal should only occur if did_init is True."""
- cc_lxd.maybe_cleanup_default(
- net_name=self.defnet, did_init=False, create=True, attach=True)
- m_lxc.assert_not_called()
-
- @mock.patch("cloudinit.config.cc_lxd._lxc")
- def test_network_deleted_if_create_true(self, m_lxc):
- """deletion of network should occur if create is True."""
- cc_lxd.maybe_cleanup_default(
- net_name=self.defnet, did_init=True, create=True, attach=False)
- m_lxc.assert_called_with(["network", "delete", self.defnet])
-
- @mock.patch("cloudinit.config.cc_lxd._lxc")
- def test_device_removed_if_attach_true(self, m_lxc):
- """deletion of network should occur if create is True."""
- nic_name = "my_nic"
- profile = "my_profile"
- cc_lxd.maybe_cleanup_default(
- net_name=self.defnet, did_init=True, create=False, attach=True,
- profile=profile, nic_name=nic_name)
- m_lxc.assert_called_once_with(
- ["profile", "device", "remove", profile, nic_name])
-
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_mounts.py b/tests/unittests/test_handler/test_handler_mounts.py
deleted file mode 100644
index e87069f6..00000000
--- a/tests/unittests/test_handler/test_handler_mounts.py
+++ /dev/null
@@ -1,397 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import os.path
-from unittest import mock
-
-from cloudinit.config import cc_mounts
-
-from cloudinit.tests import helpers as test_helpers
-
-
-class TestSanitizeDevname(test_helpers.FilesystemMockingTestCase):
-
- def setUp(self):
- super(TestSanitizeDevname, self).setUp()
- self.new_root = self.tmp_dir()
- self.patchOS(self.new_root)
-
- def _touch(self, path):
- path = os.path.join(self.new_root, path.lstrip('/'))
- basedir = os.path.dirname(path)
- if not os.path.exists(basedir):
- os.makedirs(basedir)
- open(path, 'a').close()
-
- def _makedirs(self, directory):
- directory = os.path.join(self.new_root, directory.lstrip('/'))
- if not os.path.exists(directory):
- os.makedirs(directory)
-
- def mock_existence_of_disk(self, disk_path):
- self._touch(disk_path)
- self._makedirs(os.path.join('/sys/block', disk_path.split('/')[-1]))
-
- def mock_existence_of_partition(self, disk_path, partition_number):
- self.mock_existence_of_disk(disk_path)
- self._touch(disk_path + str(partition_number))
- disk_name = disk_path.split('/')[-1]
- self._makedirs(os.path.join('/sys/block',
- disk_name,
- disk_name + str(partition_number)))
-
- def test_existent_full_disk_path_is_returned(self):
- disk_path = '/dev/sda'
- self.mock_existence_of_disk(disk_path)
- self.assertEqual(disk_path,
- cc_mounts.sanitize_devname(disk_path,
- lambda x: None,
- mock.Mock()))
-
- def test_existent_disk_name_returns_full_path(self):
- disk_name = 'sda'
- disk_path = '/dev/' + disk_name
- self.mock_existence_of_disk(disk_path)
- self.assertEqual(disk_path,
- cc_mounts.sanitize_devname(disk_name,
- lambda x: None,
- mock.Mock()))
-
- def test_existent_meta_disk_is_returned(self):
- actual_disk_path = '/dev/sda'
- self.mock_existence_of_disk(actual_disk_path)
- self.assertEqual(
- actual_disk_path,
- cc_mounts.sanitize_devname('ephemeral0',
- lambda x: actual_disk_path,
- mock.Mock()))
-
- def test_existent_meta_partition_is_returned(self):
- disk_name, partition_part = '/dev/sda', '1'
- actual_partition_path = disk_name + partition_part
- self.mock_existence_of_partition(disk_name, partition_part)
- self.assertEqual(
- actual_partition_path,
- cc_mounts.sanitize_devname('ephemeral0.1',
- lambda x: disk_name,
- mock.Mock()))
-
- def test_existent_meta_partition_with_p_is_returned(self):
- disk_name, partition_part = '/dev/sda', 'p1'
- actual_partition_path = disk_name + partition_part
- self.mock_existence_of_partition(disk_name, partition_part)
- self.assertEqual(
- actual_partition_path,
- cc_mounts.sanitize_devname('ephemeral0.1',
- lambda x: disk_name,
- mock.Mock()))
-
- def test_first_partition_returned_if_existent_disk_is_partitioned(self):
- disk_name, partition_part = '/dev/sda', '1'
- actual_partition_path = disk_name + partition_part
- self.mock_existence_of_partition(disk_name, partition_part)
- self.assertEqual(
- actual_partition_path,
- cc_mounts.sanitize_devname('ephemeral0',
- lambda x: disk_name,
- mock.Mock()))
-
- def test_nth_partition_returned_if_requested(self):
- disk_name, partition_part = '/dev/sda', '3'
- actual_partition_path = disk_name + partition_part
- self.mock_existence_of_partition(disk_name, partition_part)
- self.assertEqual(
- actual_partition_path,
- cc_mounts.sanitize_devname('ephemeral0.3',
- lambda x: disk_name,
- mock.Mock()))
-
- def test_transformer_returning_none_returns_none(self):
- self.assertIsNone(
- cc_mounts.sanitize_devname(
- 'ephemeral0', lambda x: None, mock.Mock()))
-
- def test_missing_device_returns_none(self):
- self.assertIsNone(
- cc_mounts.sanitize_devname('/dev/sda', None, mock.Mock()))
-
- def test_missing_sys_returns_none(self):
- disk_path = '/dev/sda'
- self._makedirs(disk_path)
- self.assertIsNone(
- cc_mounts.sanitize_devname(disk_path, None, mock.Mock()))
-
- def test_existent_disk_but_missing_partition_returns_none(self):
- disk_path = '/dev/sda'
- self.mock_existence_of_disk(disk_path)
- self.assertIsNone(
- cc_mounts.sanitize_devname(
- 'ephemeral0.1', lambda x: disk_path, mock.Mock()))
-
- def test_network_device_returns_network_device(self):
- disk_path = 'netdevice:/path'
- self.assertEqual(
- disk_path,
- cc_mounts.sanitize_devname(disk_path, None, mock.Mock()))
-
-
-class TestSwapFileCreation(test_helpers.FilesystemMockingTestCase):
-
- def setUp(self):
- super(TestSwapFileCreation, self).setUp()
- self.new_root = self.tmp_dir()
- self.patchOS(self.new_root)
-
- self.fstab_path = os.path.join(self.new_root, 'etc/fstab')
- self.swap_path = os.path.join(self.new_root, 'swap.img')
- self._makedirs('/etc')
-
- self.add_patch('cloudinit.config.cc_mounts.FSTAB_PATH',
- 'mock_fstab_path',
- self.fstab_path,
- autospec=False)
-
- self.add_patch('cloudinit.config.cc_mounts.subp.subp',
- 'm_subp_subp')
-
- self.add_patch('cloudinit.config.cc_mounts.util.mounts',
- 'mock_util_mounts',
- return_value={
- '/dev/sda1': {'fstype': 'ext4',
- 'mountpoint': '/',
- 'opts': 'rw,relatime,discard'
- }})
-
- self.mock_cloud = mock.Mock()
- self.mock_log = mock.Mock()
- self.mock_cloud.device_name_to_device = self.device_name_to_device
-
- self.cc = {
- 'swap': {
- 'filename': self.swap_path,
- 'size': '512',
- 'maxsize': '512'}}
-
- def _makedirs(self, directory):
- directory = os.path.join(self.new_root, directory.lstrip('/'))
- if not os.path.exists(directory):
- os.makedirs(directory)
-
- def device_name_to_device(self, path):
- if path == 'swap':
- return self.swap_path
- else:
- dev = None
-
- return dev
-
- @mock.patch('cloudinit.util.get_mount_info')
- @mock.patch('cloudinit.util.kernel_version')
- def test_swap_creation_method_fallocate_on_xfs(self, m_kernel_version,
- m_get_mount_info):
- m_kernel_version.return_value = (4, 20)
- m_get_mount_info.return_value = ["", "xfs"]
-
- cc_mounts.handle(None, self.cc, self.mock_cloud, self.mock_log, [])
- self.m_subp_subp.assert_has_calls([
- mock.call(['fallocate', '-l', '0M', self.swap_path], capture=True),
- mock.call(['mkswap', self.swap_path]),
- mock.call(['swapon', '-a'])])
-
- @mock.patch('cloudinit.util.get_mount_info')
- @mock.patch('cloudinit.util.kernel_version')
- def test_swap_creation_method_xfs(self, m_kernel_version,
- m_get_mount_info):
- m_kernel_version.return_value = (3, 18)
- m_get_mount_info.return_value = ["", "xfs"]
-
- cc_mounts.handle(None, self.cc, self.mock_cloud, self.mock_log, [])
- self.m_subp_subp.assert_has_calls([
- mock.call(['dd', 'if=/dev/zero',
- 'of=' + self.swap_path,
- 'bs=1M', 'count=0'], capture=True),
- mock.call(['mkswap', self.swap_path]),
- mock.call(['swapon', '-a'])])
-
- @mock.patch('cloudinit.util.get_mount_info')
- @mock.patch('cloudinit.util.kernel_version')
- def test_swap_creation_method_btrfs(self, m_kernel_version,
- m_get_mount_info):
- m_kernel_version.return_value = (4, 20)
- m_get_mount_info.return_value = ["", "btrfs"]
-
- cc_mounts.handle(None, self.cc, self.mock_cloud, self.mock_log, [])
- self.m_subp_subp.assert_has_calls([
- mock.call(['dd', 'if=/dev/zero',
- 'of=' + self.swap_path,
- 'bs=1M', 'count=0'], capture=True),
- mock.call(['mkswap', self.swap_path]),
- mock.call(['swapon', '-a'])])
-
- @mock.patch('cloudinit.util.get_mount_info')
- @mock.patch('cloudinit.util.kernel_version')
- def test_swap_creation_method_ext4(self, m_kernel_version,
- m_get_mount_info):
- m_kernel_version.return_value = (5, 14)
- m_get_mount_info.return_value = ["", "ext4"]
-
- cc_mounts.handle(None, self.cc, self.mock_cloud, self.mock_log, [])
- self.m_subp_subp.assert_has_calls([
- mock.call(['fallocate', '-l', '0M', self.swap_path], capture=True),
- mock.call(['mkswap', self.swap_path]),
- mock.call(['swapon', '-a'])])
-
-
-class TestFstabHandling(test_helpers.FilesystemMockingTestCase):
-
- swap_path = '/dev/sdb1'
-
- def setUp(self):
- super(TestFstabHandling, self).setUp()
- self.new_root = self.tmp_dir()
- self.patchOS(self.new_root)
-
- self.fstab_path = os.path.join(self.new_root, 'etc/fstab')
- self._makedirs('/etc')
-
- self.add_patch('cloudinit.config.cc_mounts.FSTAB_PATH',
- 'mock_fstab_path',
- self.fstab_path,
- autospec=False)
-
- self.add_patch('cloudinit.config.cc_mounts._is_block_device',
- 'mock_is_block_device',
- return_value=True)
-
- self.add_patch('cloudinit.config.cc_mounts.subp.subp',
- 'm_subp_subp')
-
- self.add_patch('cloudinit.config.cc_mounts.util.mounts',
- 'mock_util_mounts',
- return_value={
- '/dev/sda1': {'fstype': 'ext4',
- 'mountpoint': '/',
- 'opts': 'rw,relatime,discard'
- }})
-
- self.mock_cloud = mock.Mock()
- self.mock_log = mock.Mock()
- self.mock_cloud.device_name_to_device = self.device_name_to_device
-
- def _makedirs(self, directory):
- directory = os.path.join(self.new_root, directory.lstrip('/'))
- if not os.path.exists(directory):
- os.makedirs(directory)
-
- def device_name_to_device(self, path):
- if path == 'swap':
- return self.swap_path
- else:
- dev = None
-
- return dev
-
- def test_no_fstab(self):
- """ Handle images which do not include an fstab. """
- self.assertFalse(os.path.exists(cc_mounts.FSTAB_PATH))
- fstab_expected_content = (
- '%s\tnone\tswap\tsw,comment=cloudconfig\t'
- '0\t0\n' % (self.swap_path,)
- )
- cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, [])
- with open(cc_mounts.FSTAB_PATH, 'r') as fd:
- fstab_new_content = fd.read()
- self.assertEqual(fstab_expected_content, fstab_new_content)
-
- def test_swap_integrity(self):
- '''Ensure that the swap file is correctly created and can
- swapon successfully. Fixing the corner case of:
- kernel: swapon: swapfile has holes'''
-
- fstab = '/swap.img swap swap defaults 0 0\n'
-
- with open(cc_mounts.FSTAB_PATH, 'w') as fd:
- fd.write(fstab)
- cc = {'swap': ['filename: /swap.img', 'size: 512', 'maxsize: 512']}
- cc_mounts.handle(None, cc, self.mock_cloud, self.mock_log, [])
-
- def test_fstab_no_swap_device(self):
- '''Ensure that cloud-init adds a discovered swap partition
- to /etc/fstab.'''
-
- fstab_original_content = ''
- fstab_expected_content = (
- '%s\tnone\tswap\tsw,comment=cloudconfig\t'
- '0\t0\n' % (self.swap_path,)
- )
-
- with open(cc_mounts.FSTAB_PATH, 'w') as fd:
- fd.write(fstab_original_content)
-
- cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, [])
-
- with open(cc_mounts.FSTAB_PATH, 'r') as fd:
- fstab_new_content = fd.read()
- self.assertEqual(fstab_expected_content, fstab_new_content)
-
- def test_fstab_same_swap_device_already_configured(self):
- '''Ensure that cloud-init will not add a swap device if the same
- device already exists in /etc/fstab.'''
-
- fstab_original_content = '%s swap swap defaults 0 0\n' % (
- self.swap_path,)
- fstab_expected_content = fstab_original_content
-
- with open(cc_mounts.FSTAB_PATH, 'w') as fd:
- fd.write(fstab_original_content)
-
- cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, [])
-
- with open(cc_mounts.FSTAB_PATH, 'r') as fd:
- fstab_new_content = fd.read()
- self.assertEqual(fstab_expected_content, fstab_new_content)
-
- def test_fstab_alternate_swap_device_already_configured(self):
- '''Ensure that cloud-init will add a discovered swap device to
- /etc/fstab even when there exists a swap definition on another
- device.'''
-
- fstab_original_content = '/dev/sdc1 swap swap defaults 0 0\n'
- fstab_expected_content = (
- fstab_original_content +
- '%s\tnone\tswap\tsw,comment=cloudconfig\t'
- '0\t0\n' % (self.swap_path,)
- )
-
- with open(cc_mounts.FSTAB_PATH, 'w') as fd:
- fd.write(fstab_original_content)
-
- cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, [])
-
- with open(cc_mounts.FSTAB_PATH, 'r') as fd:
- fstab_new_content = fd.read()
- self.assertEqual(fstab_expected_content, fstab_new_content)
-
- def test_no_change_fstab_sets_needs_mount_all(self):
- '''verify unchanged fstab entries are mounted if not call mount -a'''
- fstab_original_content = (
- 'LABEL=cloudimg-rootfs / ext4 defaults 0 0\n'
- 'LABEL=UEFI /boot/efi vfat defaults 0 0\n'
- '/dev/vdb /mnt auto defaults,noexec,comment=cloudconfig 0 2\n'
- )
- fstab_expected_content = fstab_original_content
- cc = {
- 'mounts': [
- ['/dev/vdb', '/mnt', 'auto', 'defaults,noexec']
- ]
- }
- with open(cc_mounts.FSTAB_PATH, 'w') as fd:
- fd.write(fstab_original_content)
- with open(cc_mounts.FSTAB_PATH, 'r') as fd:
- fstab_new_content = fd.read()
- self.assertEqual(fstab_expected_content, fstab_new_content)
- cc_mounts.handle(None, cc, self.mock_cloud, self.mock_log, [])
- self.m_subp_subp.assert_has_calls([
- mock.call(['mount', '-a']),
- mock.call(['systemctl', 'daemon-reload'])])
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_puppet.py b/tests/unittests/test_handler/test_handler_puppet.py
deleted file mode 100644
index 62388ac6..00000000
--- a/tests/unittests/test_handler/test_handler_puppet.py
+++ /dev/null
@@ -1,179 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit.config import cc_puppet
-from cloudinit.sources import DataSourceNone
-from cloudinit import (distros, helpers, cloud, util)
-from cloudinit.tests.helpers import CiTestCase, mock
-
-import logging
-import textwrap
-
-
-LOG = logging.getLogger(__name__)
-
-
-@mock.patch('cloudinit.config.cc_puppet.subp.subp')
-@mock.patch('cloudinit.config.cc_puppet.os')
-class TestAutostartPuppet(CiTestCase):
-
- def test_wb_autostart_puppet_updates_puppet_default(self, m_os, m_subp):
- """Update /etc/default/puppet to autostart if it exists."""
-
- def _fake_exists(path):
- return path == '/etc/default/puppet'
-
- m_os.path.exists.side_effect = _fake_exists
- cc_puppet._autostart_puppet(LOG)
- self.assertEqual(
- [mock.call(['sed', '-i', '-e', 's/^START=.*/START=yes/',
- '/etc/default/puppet'], capture=False)],
- m_subp.call_args_list)
-
- def test_wb_autostart_pupppet_enables_puppet_systemctl(self, m_os, m_subp):
- """If systemctl is present, enable puppet via systemctl."""
-
- def _fake_exists(path):
- return path == '/bin/systemctl'
-
- m_os.path.exists.side_effect = _fake_exists
- cc_puppet._autostart_puppet(LOG)
- expected_calls = [mock.call(
- ['/bin/systemctl', 'enable', 'puppet.service'], capture=False)]
- self.assertEqual(expected_calls, m_subp.call_args_list)
-
- def test_wb_autostart_pupppet_enables_puppet_chkconfig(self, m_os, m_subp):
- """If chkconfig is present, enable puppet via checkcfg."""
-
- def _fake_exists(path):
- return path == '/sbin/chkconfig'
-
- m_os.path.exists.side_effect = _fake_exists
- cc_puppet._autostart_puppet(LOG)
- expected_calls = [mock.call(
- ['/sbin/chkconfig', 'puppet', 'on'], capture=False)]
- self.assertEqual(expected_calls, m_subp.call_args_list)
-
-
-@mock.patch('cloudinit.config.cc_puppet._autostart_puppet')
-class TestPuppetHandle(CiTestCase):
-
- with_logs = True
-
- def setUp(self):
- super(TestPuppetHandle, self).setUp()
- self.new_root = self.tmp_dir()
- self.conf = self.tmp_path('puppet.conf')
- self.csr_attributes_path = self.tmp_path('csr_attributes.yaml')
-
- def _get_cloud(self, distro):
- paths = helpers.Paths({'templates_dir': self.new_root})
- cls = distros.fetch(distro)
- mydist = cls(distro, {}, paths)
- myds = DataSourceNone.DataSourceNone({}, mydist, paths)
- return cloud.Cloud(myds, paths, {}, mydist, None)
-
- def test_handler_skips_missing_puppet_key_in_cloudconfig(self, m_auto):
- """Cloud-config containing no 'puppet' key is skipped."""
- mycloud = self._get_cloud('ubuntu')
- cfg = {}
- cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
- self.assertIn(
- "no 'puppet' configuration found", self.logs.getvalue())
- self.assertEqual(0, m_auto.call_count)
-
- @mock.patch('cloudinit.config.cc_puppet.subp.subp')
- def test_handler_puppet_config_starts_puppet_service(self, m_subp, m_auto):
- """Cloud-config 'puppet' configuration starts puppet."""
- mycloud = self._get_cloud('ubuntu')
- cfg = {'puppet': {'install': False}}
- cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
- self.assertEqual(1, m_auto.call_count)
- self.assertEqual(
- [mock.call(['service', 'puppet', 'start'], capture=False)],
- m_subp.call_args_list)
-
- @mock.patch('cloudinit.config.cc_puppet.subp.subp')
- def test_handler_empty_puppet_config_installs_puppet(self, m_subp, m_auto):
- """Cloud-config empty 'puppet' configuration installs latest puppet."""
- mycloud = self._get_cloud('ubuntu')
- mycloud.distro = mock.MagicMock()
- cfg = {'puppet': {}}
- cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
- self.assertEqual(
- [mock.call(('puppet', None))],
- mycloud.distro.install_packages.call_args_list)
-
- @mock.patch('cloudinit.config.cc_puppet.subp.subp')
- def test_handler_puppet_config_installs_puppet_on_true(self, m_subp, _):
- """Cloud-config with 'puppet' key installs when 'install' is True."""
- mycloud = self._get_cloud('ubuntu')
- mycloud.distro = mock.MagicMock()
- cfg = {'puppet': {'install': True}}
- cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
- self.assertEqual(
- [mock.call(('puppet', None))],
- mycloud.distro.install_packages.call_args_list)
-
- @mock.patch('cloudinit.config.cc_puppet.subp.subp')
- def test_handler_puppet_config_installs_puppet_version(self, m_subp, _):
- """Cloud-config 'puppet' configuration can specify a version."""
- mycloud = self._get_cloud('ubuntu')
- mycloud.distro = mock.MagicMock()
- cfg = {'puppet': {'version': '3.8'}}
- cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
- self.assertEqual(
- [mock.call(('puppet', '3.8'))],
- mycloud.distro.install_packages.call_args_list)
-
- @mock.patch('cloudinit.config.cc_puppet.subp.subp')
- def test_handler_puppet_config_updates_puppet_conf(self, m_subp, m_auto):
- """When 'conf' is provided update values in PUPPET_CONF_PATH."""
- mycloud = self._get_cloud('ubuntu')
- cfg = {
- 'puppet': {
- 'conf': {'agent': {'server': 'puppetmaster.example.org'}}}}
- util.write_file(self.conf, '[agent]\nserver = origpuppet\nother = 3')
- puppet_conf_path = 'cloudinit.config.cc_puppet.PUPPET_CONF_PATH'
- mycloud.distro = mock.MagicMock()
- with mock.patch(puppet_conf_path, self.conf):
- cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
- content = util.load_file(self.conf)
- expected = '[agent]\nserver = puppetmaster.example.org\nother = 3\n\n'
- self.assertEqual(expected, content)
-
- @mock.patch('cloudinit.config.cc_puppet.subp.subp')
- def test_handler_puppet_writes_csr_attributes_file(self, m_subp, m_auto):
- """When csr_attributes is provided
- creates file in PUPPET_CSR_ATTRIBUTES_PATH."""
- mycloud = self._get_cloud('ubuntu')
- mycloud.distro = mock.MagicMock()
- cfg = {
- 'puppet': {
- 'csr_attributes': {
- 'custom_attributes': {
- '1.2.840.113549.1.9.7':
- '342thbjkt82094y0uthhor289jnqthpc2290'
- },
- 'extension_requests': {
- 'pp_uuid': 'ED803750-E3C7-44F5-BB08-41A04433FE2E',
- 'pp_image_name': 'my_ami_image',
- 'pp_preshared_key':
- '342thbjkt82094y0uthhor289jnqthpc2290'
- }
- }
- }
- }
- csr_attributes = 'cloudinit.config.cc_puppet.' \
- 'PUPPET_CSR_ATTRIBUTES_PATH'
- with mock.patch(csr_attributes, self.csr_attributes_path):
- cc_puppet.handle('notimportant', cfg, mycloud, LOG, None)
- content = util.load_file(self.csr_attributes_path)
- expected = textwrap.dedent("""\
- custom_attributes:
- 1.2.840.113549.1.9.7: 342thbjkt82094y0uthhor289jnqthpc2290
- extension_requests:
- pp_image_name: my_ami_image
- pp_preshared_key: 342thbjkt82094y0uthhor289jnqthpc2290
- pp_uuid: ED803750-E3C7-44F5-BB08-41A04433FE2E
- """)
- self.assertEqual(expected, content)
diff --git a/tests/unittests/test_handler/test_handler_refresh_rmc_and_interface.py b/tests/unittests/test_handler/test_handler_refresh_rmc_and_interface.py
deleted file mode 100644
index e13b7793..00000000
--- a/tests/unittests/test_handler/test_handler_refresh_rmc_and_interface.py
+++ /dev/null
@@ -1,109 +0,0 @@
-from cloudinit.config import cc_refresh_rmc_and_interface as ccrmci
-
-from cloudinit import util
-
-from cloudinit.tests import helpers as t_help
-from cloudinit.tests.helpers import mock
-
-from textwrap import dedent
-import logging
-
-LOG = logging.getLogger(__name__)
-MPATH = "cloudinit.config.cc_refresh_rmc_and_interface"
-NET_INFO = {
- 'lo': {'ipv4': [{'ip': '127.0.0.1',
- 'bcast': '', 'mask': '255.0.0.0',
- 'scope': 'host'}],
- 'ipv6': [{'ip': '::1/128',
- 'scope6': 'host'}], 'hwaddr': '',
- 'up': 'True'},
- 'env2': {'ipv4': [{'ip': '8.0.0.19',
- 'bcast': '8.0.0.255', 'mask': '255.255.255.0',
- 'scope': 'global'}],
- 'ipv6': [{'ip': 'fe80::f896:c2ff:fe81:8220/64',
- 'scope6': 'link'}], 'hwaddr': 'fa:96:c2:81:82:20',
- 'up': 'True'},
- 'env3': {'ipv4': [{'ip': '90.0.0.14',
- 'bcast': '90.0.0.255', 'mask': '255.255.255.0',
- 'scope': 'global'}],
- 'ipv6': [{'ip': 'fe80::f896:c2ff:fe81:8221/64',
- 'scope6': 'link'}], 'hwaddr': 'fa:96:c2:81:82:21',
- 'up': 'True'},
- 'env4': {'ipv4': [{'ip': '9.114.23.7',
- 'bcast': '9.114.23.255', 'mask': '255.255.255.0',
- 'scope': 'global'}],
- 'ipv6': [{'ip': 'fe80::f896:c2ff:fe81:8222/64',
- 'scope6': 'link'}], 'hwaddr': 'fa:96:c2:81:82:22',
- 'up': 'True'},
- 'env5': {'ipv4': [],
- 'ipv6': [{'ip': 'fe80::9c26:c3ff:fea4:62c8/64',
- 'scope6': 'link'}], 'hwaddr': '42:20:86:df:fa:4c',
- 'up': 'True'}}
-
-
-class TestRsctNodeFile(t_help.CiTestCase):
- def test_disable_ipv6_interface(self):
- """test parsing of iface files."""
- fname = self.tmp_path("iface-eth5")
- util.write_file(fname, dedent("""\
- BOOTPROTO=static
- DEVICE=eth5
- HWADDR=42:20:86:df:fa:4c
- IPV6INIT=yes
- IPADDR6=fe80::9c26:c3ff:fea4:62c8/64
- IPV6ADDR=fe80::9c26:c3ff:fea4:62c8/64
- NM_CONTROLLED=yes
- ONBOOT=yes
- STARTMODE=auto
- TYPE=Ethernet
- USERCTL=no
- """))
-
- ccrmci.disable_ipv6(fname)
- self.assertEqual(dedent("""\
- BOOTPROTO=static
- DEVICE=eth5
- HWADDR=42:20:86:df:fa:4c
- ONBOOT=yes
- STARTMODE=auto
- TYPE=Ethernet
- USERCTL=no
- NM_CONTROLLED=no
- """), util.load_file(fname))
-
- @mock.patch(MPATH + '.refresh_rmc')
- @mock.patch(MPATH + '.restart_network_manager')
- @mock.patch(MPATH + '.disable_ipv6')
- @mock.patch(MPATH + '.refresh_ipv6')
- @mock.patch(MPATH + '.netinfo.netdev_info')
- @mock.patch(MPATH + '.subp.which')
- def test_handle(self, m_refresh_rmc,
- m_netdev_info, m_refresh_ipv6, m_disable_ipv6,
- m_restart_nm, m_which):
- """Basic test of handle."""
- m_netdev_info.return_value = NET_INFO
- m_which.return_value = '/opt/rsct/bin/rmcctrl'
- ccrmci.handle(
- "refresh_rmc_and_interface", None, None, None, None)
- self.assertEqual(1, m_netdev_info.call_count)
- m_refresh_ipv6.assert_called_with('env5')
- m_disable_ipv6.assert_called_with(
- '/etc/sysconfig/network-scripts/ifcfg-env5')
- self.assertEqual(1, m_restart_nm.call_count)
- self.assertEqual(1, m_refresh_rmc.call_count)
-
- @mock.patch(MPATH + '.netinfo.netdev_info')
- def test_find_ipv6(self, m_netdev_info):
- """find_ipv6_ifaces parses netdev_info returning those with ipv6"""
- m_netdev_info.return_value = NET_INFO
- found = ccrmci.find_ipv6_ifaces()
- self.assertEqual(['env5'], found)
-
- @mock.patch(MPATH + '.subp.subp')
- def test_refresh_ipv6(self, m_subp):
- """refresh_ipv6 should ip down and up the interface."""
- iface = "myeth0"
- ccrmci.refresh_ipv6(iface)
- m_subp.assert_has_calls([
- mock.call(['ip', 'link', 'set', iface, 'down']),
- mock.call(['ip', 'link', 'set', iface, 'up'])])
diff --git a/tests/unittests/test_handler/test_handler_resizefs.py b/tests/unittests/test_handler/test_handler_resizefs.py
deleted file mode 100644
index 28d55072..00000000
--- a/tests/unittests/test_handler/test_handler_resizefs.py
+++ /dev/null
@@ -1,398 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit.config.cc_resizefs import (
- can_skip_resize, handle, maybe_get_writable_device_path, _resize_btrfs,
- _resize_zfs, _resize_xfs, _resize_ext, _resize_ufs)
-
-from collections import namedtuple
-import logging
-
-from cloudinit.subp import ProcessExecutionError
-from cloudinit.tests.helpers import (
- CiTestCase, mock, skipUnlessJsonSchema, util, wrap_and_call)
-
-
-LOG = logging.getLogger(__name__)
-
-
-class TestResizefs(CiTestCase):
- with_logs = True
-
- def setUp(self):
- super(TestResizefs, self).setUp()
- self.name = "resizefs"
-
- @mock.patch('cloudinit.subp.subp')
- def test_skip_ufs_resize(self, m_subp):
- fs_type = "ufs"
- resize_what = "/"
- devpth = "/dev/da0p2"
- err = ("growfs: requested size 2.0GB is not larger than the "
- "current filesystem size 2.0GB\n")
- exception = ProcessExecutionError(stderr=err, exit_code=1)
- m_subp.side_effect = exception
- res = can_skip_resize(fs_type, resize_what, devpth)
- self.assertTrue(res)
-
- @mock.patch('cloudinit.subp.subp')
- def test_cannot_skip_ufs_resize(self, m_subp):
- fs_type = "ufs"
- resize_what = "/"
- devpth = "/dev/da0p2"
- m_subp.return_value = (
- ("stdout: super-block backups (for fsck_ffs -b #) at:\n\n"),
- ("growfs: no room to allocate last cylinder group; "
- "leaving 364KB unused\n")
- )
- res = can_skip_resize(fs_type, resize_what, devpth)
- self.assertFalse(res)
-
- @mock.patch('cloudinit.subp.subp')
- def test_cannot_skip_ufs_growfs_exception(self, m_subp):
- fs_type = "ufs"
- resize_what = "/"
- devpth = "/dev/da0p2"
- err = "growfs: /dev/da0p2 is not clean - run fsck.\n"
- exception = ProcessExecutionError(stderr=err, exit_code=1)
- m_subp.side_effect = exception
- with self.assertRaises(ProcessExecutionError):
- can_skip_resize(fs_type, resize_what, devpth)
-
- def test_can_skip_resize_ext(self):
- self.assertFalse(can_skip_resize('ext', '/', '/dev/sda1'))
-
- def test_handle_noops_on_disabled(self):
- """The handle function logs when the configuration disables resize."""
- cfg = {'resize_rootfs': False}
- handle('cc_resizefs', cfg, _cloud=None, log=LOG, args=[])
- self.assertIn(
- 'DEBUG: Skipping module named cc_resizefs, resizing disabled\n',
- self.logs.getvalue())
-
- @skipUnlessJsonSchema()
- def test_handle_schema_validation_logs_invalid_resize_rootfs_value(self):
- """The handle reports json schema violations as a warning.
-
- Invalid values for resize_rootfs result in disabling the module.
- """
- cfg = {'resize_rootfs': 'junk'}
- handle('cc_resizefs', cfg, _cloud=None, log=LOG, args=[])
- logs = self.logs.getvalue()
- self.assertIn(
- "WARNING: Invalid config:\nresize_rootfs: 'junk' is not one of"
- " [True, False, 'noblock']",
- logs)
- self.assertIn(
- 'DEBUG: Skipping module named cc_resizefs, resizing disabled\n',
- logs)
-
- @mock.patch('cloudinit.config.cc_resizefs.util.get_mount_info')
- def test_handle_warns_on_unknown_mount_info(self, m_get_mount_info):
- """handle warns when get_mount_info sees unknown filesystem for /."""
- m_get_mount_info.return_value = None
- cfg = {'resize_rootfs': True}
- handle('cc_resizefs', cfg, _cloud=None, log=LOG, args=[])
- logs = self.logs.getvalue()
- self.assertNotIn("WARNING: Invalid config:\nresize_rootfs:", logs)
- self.assertIn(
- 'WARNING: Could not determine filesystem type of /\n',
- logs)
- self.assertEqual(
- [mock.call('/', LOG)],
- m_get_mount_info.call_args_list)
-
- def test_handle_warns_on_undiscoverable_root_path_in_commandline(self):
- """handle noops when the root path is not found on the commandline."""
- cfg = {'resize_rootfs': True}
- exists_mock_path = 'cloudinit.config.cc_resizefs.os.path.exists'
-
- def fake_mount_info(path, log):
- self.assertEqual('/', path)
- self.assertEqual(LOG, log)
- return ('/dev/root', 'ext4', '/')
-
- with mock.patch(exists_mock_path) as m_exists:
- m_exists.return_value = False
- wrap_and_call(
- 'cloudinit.config.cc_resizefs.util',
- {'is_container': {'return_value': False},
- 'get_mount_info': {'side_effect': fake_mount_info},
- 'get_cmdline': {'return_value': 'BOOT_IMAGE=/vmlinuz.efi'}},
- handle, 'cc_resizefs', cfg, _cloud=None, log=LOG,
- args=[])
- logs = self.logs.getvalue()
- self.assertIn("WARNING: Unable to find device '/dev/root'", logs)
-
- def test_resize_zfs_cmd_return(self):
- zpool = 'zroot'
- devpth = 'gpt/system'
- self.assertEqual(('zpool', 'online', '-e', zpool, devpth),
- _resize_zfs(zpool, devpth))
-
- def test_resize_xfs_cmd_return(self):
- mount_point = '/mnt/test'
- devpth = '/dev/sda1'
- self.assertEqual(('xfs_growfs', mount_point),
- _resize_xfs(mount_point, devpth))
-
- def test_resize_ext_cmd_return(self):
- mount_point = '/'
- devpth = '/dev/sdb1'
- self.assertEqual(('resize2fs', devpth),
- _resize_ext(mount_point, devpth))
-
- def test_resize_ufs_cmd_return(self):
- mount_point = '/'
- devpth = '/dev/sda2'
- self.assertEqual(('growfs', '-y', mount_point),
- _resize_ufs(mount_point, devpth))
-
- @mock.patch('cloudinit.util.is_container', return_value=False)
- @mock.patch('cloudinit.util.parse_mount')
- @mock.patch('cloudinit.util.get_device_info_from_zpool')
- @mock.patch('cloudinit.util.get_mount_info')
- def test_handle_zfs_root(self, mount_info, zpool_info, parse_mount,
- is_container):
- devpth = 'vmzroot/ROOT/freebsd'
- disk = 'gpt/system'
- fs_type = 'zfs'
- mount_point = '/'
-
- mount_info.return_value = (devpth, fs_type, mount_point)
- zpool_info.return_value = disk
- parse_mount.return_value = (devpth, fs_type, mount_point)
-
- cfg = {'resize_rootfs': True}
-
- with mock.patch('cloudinit.config.cc_resizefs.do_resize') as dresize:
- handle('cc_resizefs', cfg, _cloud=None, log=LOG, args=[])
- ret = dresize.call_args[0][0]
-
- self.assertEqual(('zpool', 'online', '-e', 'vmzroot', disk), ret)
-
- @mock.patch('cloudinit.util.is_container', return_value=False)
- @mock.patch('cloudinit.util.get_mount_info')
- @mock.patch('cloudinit.util.get_device_info_from_zpool')
- @mock.patch('cloudinit.util.parse_mount')
- def test_handle_modern_zfsroot(self, mount_info, zpool_info, parse_mount,
- is_container):
- devpth = 'zroot/ROOT/default'
- disk = 'da0p3'
- fs_type = 'zfs'
- mount_point = '/'
-
- mount_info.return_value = (devpth, fs_type, mount_point)
- zpool_info.return_value = disk
- parse_mount.return_value = (devpth, fs_type, mount_point)
-
- cfg = {'resize_rootfs': True}
-
- def fake_stat(devpath):
- if devpath == disk:
- raise OSError("not here")
- FakeStat = namedtuple(
- 'FakeStat', ['st_mode', 'st_size', 'st_mtime']) # minimal stat
- return FakeStat(25008, 0, 1) # fake char block device
-
- with mock.patch('cloudinit.config.cc_resizefs.do_resize') as dresize:
- with mock.patch('cloudinit.config.cc_resizefs.os.stat') as m_stat:
- m_stat.side_effect = fake_stat
- handle('cc_resizefs', cfg, _cloud=None, log=LOG, args=[])
-
- self.assertEqual(('zpool', 'online', '-e', 'zroot', '/dev/' + disk),
- dresize.call_args[0][0])
-
-
-class TestRootDevFromCmdline(CiTestCase):
-
- def test_rootdev_from_cmdline_with_no_root(self):
- """Return None from rootdev_from_cmdline when root is not present."""
- invalid_cases = [
- 'BOOT_IMAGE=/adsf asdfa werasef root adf', 'BOOT_IMAGE=/adsf', '']
- for case in invalid_cases:
- self.assertIsNone(util.rootdev_from_cmdline(case))
-
- def test_rootdev_from_cmdline_with_root_startswith_dev(self):
- """Return the cmdline root when the path starts with /dev."""
- self.assertEqual(
- '/dev/this', util.rootdev_from_cmdline('asdf root=/dev/this'))
-
- def test_rootdev_from_cmdline_with_root_without_dev_prefix(self):
- """Add /dev prefix to cmdline root when the path lacks the prefix."""
- self.assertEqual(
- '/dev/this', util.rootdev_from_cmdline('asdf root=this'))
-
- def test_rootdev_from_cmdline_with_root_with_label(self):
- """When cmdline root contains a LABEL, our root is disk/by-label."""
- self.assertEqual(
- '/dev/disk/by-label/unique',
- util.rootdev_from_cmdline('asdf root=LABEL=unique'))
-
- def test_rootdev_from_cmdline_with_root_with_uuid(self):
- """When cmdline root contains a UUID, our root is disk/by-uuid."""
- self.assertEqual(
- '/dev/disk/by-uuid/adsfdsaf-adsf',
- util.rootdev_from_cmdline('asdf root=UUID=adsfdsaf-adsf'))
-
-
-class TestMaybeGetDevicePathAsWritableBlock(CiTestCase):
-
- with_logs = True
-
- def test_maybe_get_writable_device_path_none_on_overlayroot(self):
- """When devpath is overlayroot (on MAAS), is_dev_writable is False."""
- info = 'does not matter'
- devpath = wrap_and_call(
- 'cloudinit.config.cc_resizefs.util',
- {'is_container': {'return_value': False}},
- maybe_get_writable_device_path, 'overlayroot', info, LOG)
- self.assertIsNone(devpath)
- self.assertIn(
- "Not attempting to resize devpath 'overlayroot'",
- self.logs.getvalue())
-
- def test_maybe_get_writable_device_path_warns_missing_cmdline_root(self):
- """When root does not exist isn't in the cmdline, log warning."""
- info = 'does not matter'
-
- def fake_mount_info(path, log):
- self.assertEqual('/', path)
- self.assertEqual(LOG, log)
- return ('/dev/root', 'ext4', '/')
-
- exists_mock_path = 'cloudinit.config.cc_resizefs.os.path.exists'
- with mock.patch(exists_mock_path) as m_exists:
- m_exists.return_value = False
- devpath = wrap_and_call(
- 'cloudinit.config.cc_resizefs.util',
- {'is_container': {'return_value': False},
- 'get_mount_info': {'side_effect': fake_mount_info},
- 'get_cmdline': {'return_value': 'BOOT_IMAGE=/vmlinuz.efi'}},
- maybe_get_writable_device_path, '/dev/root', info, LOG)
- self.assertIsNone(devpath)
- logs = self.logs.getvalue()
- self.assertIn("WARNING: Unable to find device '/dev/root'", logs)
-
- def test_maybe_get_writable_device_path_does_not_exist(self):
- """When devpath does not exist, a warning is logged."""
- info = 'dev=/dev/I/dont/exist mnt_point=/ path=/dev/none'
- devpath = wrap_and_call(
- 'cloudinit.config.cc_resizefs.util',
- {'is_container': {'return_value': False}},
- maybe_get_writable_device_path, '/dev/I/dont/exist', info, LOG)
- self.assertIsNone(devpath)
- self.assertIn(
- "WARNING: Device '/dev/I/dont/exist' did not exist."
- ' cannot resize: %s' % info,
- self.logs.getvalue())
-
- def test_maybe_get_writable_device_path_does_not_exist_in_container(self):
- """When devpath does not exist in a container, log a debug message."""
- info = 'dev=/dev/I/dont/exist mnt_point=/ path=/dev/none'
- devpath = wrap_and_call(
- 'cloudinit.config.cc_resizefs.util',
- {'is_container': {'return_value': True}},
- maybe_get_writable_device_path, '/dev/I/dont/exist', info, LOG)
- self.assertIsNone(devpath)
- self.assertIn(
- "DEBUG: Device '/dev/I/dont/exist' did not exist in container."
- ' cannot resize: %s' % info,
- self.logs.getvalue())
-
- def test_maybe_get_writable_device_path_raises_oserror(self):
- """When unexpected OSError is raises by os.stat it is reraised."""
- info = 'dev=/dev/I/dont/exist mnt_point=/ path=/dev/none'
- with self.assertRaises(OSError) as context_manager:
- wrap_and_call(
- 'cloudinit.config.cc_resizefs',
- {'util.is_container': {'return_value': True},
- 'os.stat': {'side_effect': OSError('Something unexpected')}},
- maybe_get_writable_device_path, '/dev/I/dont/exist', info, LOG)
- self.assertEqual(
- 'Something unexpected', str(context_manager.exception))
-
- def test_maybe_get_writable_device_path_non_block(self):
- """When device is not a block device, emit warning return False."""
- fake_devpath = self.tmp_path('dev/readwrite')
- util.write_file(fake_devpath, '', mode=0o600) # read-write
- info = 'dev=/dev/root mnt_point=/ path={0}'.format(fake_devpath)
-
- devpath = wrap_and_call(
- 'cloudinit.config.cc_resizefs.util',
- {'is_container': {'return_value': False}},
- maybe_get_writable_device_path, fake_devpath, info, LOG)
- self.assertIsNone(devpath)
- self.assertIn(
- "WARNING: device '{0}' not a block device. cannot resize".format(
- fake_devpath),
- self.logs.getvalue())
-
- def test_maybe_get_writable_device_path_non_block_on_container(self):
- """When device is non-block device in container, emit debug log."""
- fake_devpath = self.tmp_path('dev/readwrite')
- util.write_file(fake_devpath, '', mode=0o600) # read-write
- info = 'dev=/dev/root mnt_point=/ path={0}'.format(fake_devpath)
-
- devpath = wrap_and_call(
- 'cloudinit.config.cc_resizefs.util',
- {'is_container': {'return_value': True}},
- maybe_get_writable_device_path, fake_devpath, info, LOG)
- self.assertIsNone(devpath)
- self.assertIn(
- "DEBUG: device '{0}' not a block device in container."
- ' cannot resize'.format(fake_devpath),
- self.logs.getvalue())
-
- def test_maybe_get_writable_device_path_returns_cmdline_root(self):
- """When root device is UUID in kernel commandline, update devpath."""
- # XXX Long-term we want to use FilesystemMocking test to avoid
- # touching os.stat.
- FakeStat = namedtuple(
- 'FakeStat', ['st_mode', 'st_size', 'st_mtime']) # minimal def.
- info = 'dev=/dev/root mnt_point=/ path=/does/not/matter'
- devpath = wrap_and_call(
- 'cloudinit.config.cc_resizefs',
- {'util.get_cmdline': {'return_value': 'asdf root=UUID=my-uuid'},
- 'util.is_container': False,
- 'os.path.exists': False, # /dev/root doesn't exist
- 'os.stat': {
- 'return_value': FakeStat(25008, 0, 1)} # char block device
- },
- maybe_get_writable_device_path, '/dev/root', info, LOG)
- self.assertEqual('/dev/disk/by-uuid/my-uuid', devpath)
- self.assertIn(
- "DEBUG: Converted /dev/root to '/dev/disk/by-uuid/my-uuid'"
- " per kernel cmdline",
- self.logs.getvalue())
-
- @mock.patch('cloudinit.util.mount_is_read_write')
- @mock.patch('cloudinit.config.cc_resizefs.os.path.isdir')
- def test_resize_btrfs_mount_is_ro(self, m_is_dir, m_is_rw):
- """Do not resize / directly if it is read-only. (LP: #1734787)."""
- m_is_rw.return_value = False
- m_is_dir.return_value = True
- self.assertEqual(
- ('btrfs', 'filesystem', 'resize', 'max', '//.snapshots'),
- _resize_btrfs("/", "/dev/sda1"))
-
- @mock.patch('cloudinit.util.mount_is_read_write')
- @mock.patch('cloudinit.config.cc_resizefs.os.path.isdir')
- def test_resize_btrfs_mount_is_rw(self, m_is_dir, m_is_rw):
- """Do not resize / directly if it is read-only. (LP: #1734787)."""
- m_is_rw.return_value = True
- m_is_dir.return_value = True
- self.assertEqual(
- ('btrfs', 'filesystem', 'resize', 'max', '/'),
- _resize_btrfs("/", "/dev/sda1"))
-
- @mock.patch('cloudinit.util.is_container', return_value=True)
- @mock.patch('cloudinit.util.is_FreeBSD')
- def test_maybe_get_writable_device_path_zfs_freebsd(self, freebsd,
- m_is_container):
- freebsd.return_value = True
- info = 'dev=gpt/system mnt_point=/ path=/'
- devpth = maybe_get_writable_device_path('gpt/system', info, LOG)
- self.assertEqual('gpt/system', devpth)
-
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_resizefs_vyos.py b/tests/unittests/test_handler/test_handler_resizefs_vyos.py
deleted file mode 100644
index c18ab1ea..00000000
--- a/tests/unittests/test_handler/test_handler_resizefs_vyos.py
+++ /dev/null
@@ -1,398 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit.config.cc_resizefs_vyos import (
- can_skip_resize, handle, maybe_get_writable_device_path, _resize_btrfs,
- _resize_zfs, _resize_xfs, _resize_ext, _resize_ufs)
-
-from collections import namedtuple
-import logging
-
-from cloudinit.subp import ProcessExecutionError
-from cloudinit.tests.helpers import (
- CiTestCase, mock, skipUnlessJsonSchema, util, wrap_and_call)
-
-
-LOG = logging.getLogger(__name__)
-
-
-class TestResizefs(CiTestCase):
- with_logs = True
-
- def setUp(self):
- super(TestResizefs, self).setUp()
- self.name = "resizefs"
-
- @mock.patch('cloudinit.subp.subp')
- def test_skip_ufs_resize(self, m_subp):
- fs_type = "ufs"
- resize_what = "/"
- devpth = "/dev/da0p2"
- err = ("growfs: requested size 2.0GB is not larger than the "
- "current filesystem size 2.0GB\n")
- exception = ProcessExecutionError(stderr=err, exit_code=1)
- m_subp.side_effect = exception
- res = can_skip_resize(fs_type, resize_what, devpth)
- self.assertTrue(res)
-
- @mock.patch('cloudinit.subp.subp')
- def test_cannot_skip_ufs_resize(self, m_subp):
- fs_type = "ufs"
- resize_what = "/"
- devpth = "/dev/da0p2"
- m_subp.return_value = (
- ("stdout: super-block backups (for fsck_ffs -b #) at:\n\n"),
- ("growfs: no room to allocate last cylinder group; "
- "leaving 364KB unused\n")
- )
- res = can_skip_resize(fs_type, resize_what, devpth)
- self.assertFalse(res)
-
- @mock.patch('cloudinit.subp.subp')
- def test_cannot_skip_ufs_growfs_exception(self, m_subp):
- fs_type = "ufs"
- resize_what = "/"
- devpth = "/dev/da0p2"
- err = "growfs: /dev/da0p2 is not clean - run fsck.\n"
- exception = ProcessExecutionError(stderr=err, exit_code=1)
- m_subp.side_effect = exception
- with self.assertRaises(ProcessExecutionError):
- can_skip_resize(fs_type, resize_what, devpth)
-
- def test_can_skip_resize_ext(self):
- self.assertFalse(can_skip_resize('ext', '/', '/dev/sda1'))
-
- def test_handle_noops_on_disabled(self):
- """The handle function logs when the configuration disables resize."""
- cfg = {'resizefs_enabled': False}
- handle('cc_resizefs_vyos', cfg, _cloud=None, log=LOG, args=[])
- self.assertIn(
- 'DEBUG: Skipping module named cc_resizefs_vyos, resizing disabled\n',
- self.logs.getvalue())
-
- @skipUnlessJsonSchema()
- def test_handle_schema_validation_logs_invalid_resize_enabled_value(self):
- """The handle reports json schema violations as a warning.
-
- Invalid values for resizefs_enabled result in disabling the module.
- """
- cfg = {'resizefs_enabled': 'junk'}
- handle('cc_resizefs_vyos', cfg, _cloud=None, log=LOG, args=[])
- logs = self.logs.getvalue()
- self.assertIn(
- "WARNING: Invalid config:\nresizefs_enabled: 'junk' is not one of"
- " [True, False, 'noblock']",
- logs)
- self.assertIn(
- 'DEBUG: Skipping module named cc_resizefs_vyos, resizing disabled\n',
- logs)
-
- @mock.patch('cloudinit.config.cc_resizefs_vyos.util.get_mount_info')
- def test_handle_warns_on_unknown_mount_info(self, m_get_mount_info):
- """handle warns when get_mount_info sees unknown filesystem for /."""
- m_get_mount_info.return_value = None
- cfg = {'resizefs_enabled': True}
- handle('cc_resizefs_vyos', cfg, _cloud=None, log=LOG, args=[])
- logs = self.logs.getvalue()
- self.assertNotIn("WARNING: Invalid config:\nresizefs_enabled:", logs)
- self.assertIn(
- 'WARNING: Could not determine filesystem type of /\n',
- logs)
- self.assertEqual(
- [mock.call('/', LOG)],
- m_get_mount_info.call_args_list)
-
- def test_handle_warns_on_undiscoverable_root_path_in_commandline(self):
- """handle noops when the root path is not found on the commandline."""
- cfg = {'resizefs_enabled': True}
- exists_mock_path = 'cloudinit.config.cc_resizefs_vyos.os.path.exists'
-
- def fake_mount_info(path, log):
- self.assertEqual('/', path)
- self.assertEqual(LOG, log)
- return ('/dev/root', 'ext4', '/')
-
- with mock.patch(exists_mock_path) as m_exists:
- m_exists.return_value = False
- wrap_and_call(
- 'cloudinit.config.cc_resizefs_vyos.util',
- {'is_container': {'return_value': False},
- 'get_mount_info': {'side_effect': fake_mount_info},
- 'get_cmdline': {'return_value': 'BOOT_IMAGE=/vmlinuz.efi'}},
- handle, 'cc_resizefs_vyos', cfg, _cloud=None, log=LOG,
- args=[])
- logs = self.logs.getvalue()
- self.assertIn("WARNING: Unable to find device '/dev/root'", logs)
-
- def test_resize_zfs_cmd_return(self):
- zpool = 'zroot'
- devpth = 'gpt/system'
- self.assertEqual(('zpool', 'online', '-e', zpool, devpth),
- _resize_zfs(zpool, devpth))
-
- def test_resize_xfs_cmd_return(self):
- mount_point = '/mnt/test'
- devpth = '/dev/sda1'
- self.assertEqual(('xfs_growfs', mount_point),
- _resize_xfs(mount_point, devpth))
-
- def test_resize_ext_cmd_return(self):
- mount_point = '/'
- devpth = '/dev/sdb1'
- self.assertEqual(('resize2fs', devpth),
- _resize_ext(mount_point, devpth))
-
- def test_resize_ufs_cmd_return(self):
- mount_point = '/'
- devpth = '/dev/sda2'
- self.assertEqual(('growfs', '-y', mount_point),
- _resize_ufs(mount_point, devpth))
-
- @mock.patch('cloudinit.util.is_container', return_value=False)
- @mock.patch('cloudinit.util.parse_mount')
- @mock.patch('cloudinit.util.get_device_info_from_zpool')
- @mock.patch('cloudinit.util.get_mount_info')
- def test_handle_zfs_root(self, mount_info, zpool_info, parse_mount,
- is_container):
- devpth = 'vmzroot/ROOT/freebsd'
- disk = 'gpt/system'
- fs_type = 'zfs'
- mount_point = '/'
-
- mount_info.return_value = (devpth, fs_type, mount_point)
- zpool_info.return_value = disk
- parse_mount.return_value = (devpth, fs_type, mount_point)
-
- cfg = {'resizefs_enabled': True}
-
- with mock.patch('cloudinit.config.cc_resizefs_vyos.do_resize') as dresize:
- handle('cc_resizefs_vyos', cfg, _cloud=None, log=LOG, args=[])
- ret = dresize.call_args[0][0]
-
- self.assertEqual(('zpool', 'online', '-e', 'vmzroot', disk), ret)
-
- @mock.patch('cloudinit.util.is_container', return_value=False)
- @mock.patch('cloudinit.util.get_mount_info')
- @mock.patch('cloudinit.util.get_device_info_from_zpool')
- @mock.patch('cloudinit.util.parse_mount')
- def test_handle_modern_zfsroot(self, mount_info, zpool_info, parse_mount,
- is_container):
- devpth = 'zroot/ROOT/default'
- disk = 'da0p3'
- fs_type = 'zfs'
- mount_point = '/'
-
- mount_info.return_value = (devpth, fs_type, mount_point)
- zpool_info.return_value = disk
- parse_mount.return_value = (devpth, fs_type, mount_point)
-
- cfg = {'resizefs_enabled': True}
-
- def fake_stat(devpath):
- if devpath == disk:
- raise OSError("not here")
- FakeStat = namedtuple(
- 'FakeStat', ['st_mode', 'st_size', 'st_mtime']) # minimal stat
- return FakeStat(25008, 0, 1) # fake char block device
-
- with mock.patch('cloudinit.config.cc_resizefs_vyos.do_resize') as dresize:
- with mock.patch('cloudinit.config.cc_resizefs_vyos.os.stat') as m_stat:
- m_stat.side_effect = fake_stat
- handle('cc_resizefs_vyos', cfg, _cloud=None, log=LOG, args=[])
-
- self.assertEqual(('zpool', 'online', '-e', 'zroot', '/dev/' + disk),
- dresize.call_args[0][0])
-
-
-class TestRootDevFromCmdline(CiTestCase):
-
- def test_rootdev_from_cmdline_with_no_root(self):
- """Return None from rootdev_from_cmdline when root is not present."""
- invalid_cases = [
- 'BOOT_IMAGE=/adsf asdfa werasef root adf', 'BOOT_IMAGE=/adsf', '']
- for case in invalid_cases:
- self.assertIsNone(util.rootdev_from_cmdline(case))
-
- def test_rootdev_from_cmdline_with_root_startswith_dev(self):
- """Return the cmdline root when the path starts with /dev."""
- self.assertEqual(
- '/dev/this', util.rootdev_from_cmdline('asdf root=/dev/this'))
-
- def test_rootdev_from_cmdline_with_root_without_dev_prefix(self):
- """Add /dev prefix to cmdline root when the path lacks the prefix."""
- self.assertEqual(
- '/dev/this', util.rootdev_from_cmdline('asdf root=this'))
-
- def test_rootdev_from_cmdline_with_root_with_label(self):
- """When cmdline root contains a LABEL, our root is disk/by-label."""
- self.assertEqual(
- '/dev/disk/by-label/unique',
- util.rootdev_from_cmdline('asdf root=LABEL=unique'))
-
- def test_rootdev_from_cmdline_with_root_with_uuid(self):
- """When cmdline root contains a UUID, our root is disk/by-uuid."""
- self.assertEqual(
- '/dev/disk/by-uuid/adsfdsaf-adsf',
- util.rootdev_from_cmdline('asdf root=UUID=adsfdsaf-adsf'))
-
-
-class TestMaybeGetDevicePathAsWritableBlock(CiTestCase):
-
- with_logs = True
-
- def test_maybe_get_writable_device_path_none_on_overlayroot(self):
- """When devpath is overlayroot (on MAAS), is_dev_writable is False."""
- info = 'does not matter'
- devpath = wrap_and_call(
- 'cloudinit.config.cc_resizefs_vyos.util',
- {'is_container': {'return_value': False}},
- maybe_get_writable_device_path, 'overlayroot', info, LOG)
- self.assertIsNone(devpath)
- self.assertIn(
- "Not attempting to resize devpath 'overlayroot'",
- self.logs.getvalue())
-
- def test_maybe_get_writable_device_path_warns_missing_cmdline_root(self):
- """When root does not exist isn't in the cmdline, log warning."""
- info = 'does not matter'
-
- def fake_mount_info(path, log):
- self.assertEqual('/', path)
- self.assertEqual(LOG, log)
- return ('/dev/root', 'ext4', '/')
-
- exists_mock_path = 'cloudinit.config.cc_resizefs_vyos.os.path.exists'
- with mock.patch(exists_mock_path) as m_exists:
- m_exists.return_value = False
- devpath = wrap_and_call(
- 'cloudinit.config.cc_resizefs_vyos.util',
- {'is_container': {'return_value': False},
- 'get_mount_info': {'side_effect': fake_mount_info},
- 'get_cmdline': {'return_value': 'BOOT_IMAGE=/vmlinuz.efi'}},
- maybe_get_writable_device_path, '/dev/root', info, LOG)
- self.assertIsNone(devpath)
- logs = self.logs.getvalue()
- self.assertIn("WARNING: Unable to find device '/dev/root'", logs)
-
- def test_maybe_get_writable_device_path_does_not_exist(self):
- """When devpath does not exist, a warning is logged."""
- info = 'dev=/dev/I/dont/exist mnt_point=/ path=/dev/none'
- devpath = wrap_and_call(
- 'cloudinit.config.cc_resizefs_vyos.util',
- {'is_container': {'return_value': False}},
- maybe_get_writable_device_path, '/dev/I/dont/exist', info, LOG)
- self.assertIsNone(devpath)
- self.assertIn(
- "WARNING: Device '/dev/I/dont/exist' did not exist."
- ' cannot resize: %s' % info,
- self.logs.getvalue())
-
- def test_maybe_get_writable_device_path_does_not_exist_in_container(self):
- """When devpath does not exist in a container, log a debug message."""
- info = 'dev=/dev/I/dont/exist mnt_point=/ path=/dev/none'
- devpath = wrap_and_call(
- 'cloudinit.config.cc_resizefs_vyos.util',
- {'is_container': {'return_value': True}},
- maybe_get_writable_device_path, '/dev/I/dont/exist', info, LOG)
- self.assertIsNone(devpath)
- self.assertIn(
- "DEBUG: Device '/dev/I/dont/exist' did not exist in container."
- ' cannot resize: %s' % info,
- self.logs.getvalue())
-
- def test_maybe_get_writable_device_path_raises_oserror(self):
- """When unexpected OSError is raises by os.stat it is reraised."""
- info = 'dev=/dev/I/dont/exist mnt_point=/ path=/dev/none'
- with self.assertRaises(OSError) as context_manager:
- wrap_and_call(
- 'cloudinit.config.cc_resizefs_vyos',
- {'util.is_container': {'return_value': True},
- 'os.stat': {'side_effect': OSError('Something unexpected')}},
- maybe_get_writable_device_path, '/dev/I/dont/exist', info, LOG)
- self.assertEqual(
- 'Something unexpected', str(context_manager.exception))
-
- def test_maybe_get_writable_device_path_non_block(self):
- """When device is not a block device, emit warning return False."""
- fake_devpath = self.tmp_path('dev/readwrite')
- util.write_file(fake_devpath, '', mode=0o600) # read-write
- info = 'dev=/dev/root mnt_point=/ path={0}'.format(fake_devpath)
-
- devpath = wrap_and_call(
- 'cloudinit.config.cc_resizefs_vyos.util',
- {'is_container': {'return_value': False}},
- maybe_get_writable_device_path, fake_devpath, info, LOG)
- self.assertIsNone(devpath)
- self.assertIn(
- "WARNING: device '{0}' not a block device. cannot resize".format(
- fake_devpath),
- self.logs.getvalue())
-
- def test_maybe_get_writable_device_path_non_block_on_container(self):
- """When device is non-block device in container, emit debug log."""
- fake_devpath = self.tmp_path('dev/readwrite')
- util.write_file(fake_devpath, '', mode=0o600) # read-write
- info = 'dev=/dev/root mnt_point=/ path={0}'.format(fake_devpath)
-
- devpath = wrap_and_call(
- 'cloudinit.config.cc_resizefs_vyos.util',
- {'is_container': {'return_value': True}},
- maybe_get_writable_device_path, fake_devpath, info, LOG)
- self.assertIsNone(devpath)
- self.assertIn(
- "DEBUG: device '{0}' not a block device in container."
- ' cannot resize'.format(fake_devpath),
- self.logs.getvalue())
-
- def test_maybe_get_writable_device_path_returns_cmdline_root(self):
- """When root device is UUID in kernel commandline, update devpath."""
- # XXX Long-term we want to use FilesystemMocking test to avoid
- # touching os.stat.
- FakeStat = namedtuple(
- 'FakeStat', ['st_mode', 'st_size', 'st_mtime']) # minimal def.
- info = 'dev=/dev/root mnt_point=/ path=/does/not/matter'
- devpath = wrap_and_call(
- 'cloudinit.config.cc_resizefs_vyos',
- {'util.get_cmdline': {'return_value': 'asdf root=UUID=my-uuid'},
- 'util.is_container': False,
- 'os.path.exists': False, # /dev/root doesn't exist
- 'os.stat': {
- 'return_value': FakeStat(25008, 0, 1)} # char block device
- },
- maybe_get_writable_device_path, '/dev/root', info, LOG)
- self.assertEqual('/dev/disk/by-uuid/my-uuid', devpath)
- self.assertIn(
- "DEBUG: Converted /dev/root to '/dev/disk/by-uuid/my-uuid'"
- " per kernel cmdline",
- self.logs.getvalue())
-
- @mock.patch('cloudinit.util.mount_is_read_write')
- @mock.patch('cloudinit.config.cc_resizefs_vyos.os.path.isdir')
- def test_resize_btrfs_mount_is_ro(self, m_is_dir, m_is_rw):
- """Do not resize / directly if it is read-only. (LP: #1734787)."""
- m_is_rw.return_value = False
- m_is_dir.return_value = True
- self.assertEqual(
- ('btrfs', 'filesystem', 'resize', 'max', '//.snapshots'),
- _resize_btrfs("/", "/dev/sda1"))
-
- @mock.patch('cloudinit.util.mount_is_read_write')
- @mock.patch('cloudinit.config.cc_resizefs_vyos.os.path.isdir')
- def test_resize_btrfs_mount_is_rw(self, m_is_dir, m_is_rw):
- """Do not resize / directly if it is read-only. (LP: #1734787)."""
- m_is_rw.return_value = True
- m_is_dir.return_value = True
- self.assertEqual(
- ('btrfs', 'filesystem', 'resize', 'max', '/'),
- _resize_btrfs("/", "/dev/sda1"))
-
- @mock.patch('cloudinit.util.is_container', return_value=True)
- @mock.patch('cloudinit.util.is_FreeBSD')
- def test_maybe_get_writable_device_path_zfs_freebsd(self, freebsd,
- m_is_container):
- freebsd.return_value = True
- info = 'dev=gpt/system mnt_point=/ path=/'
- devpth = maybe_get_writable_device_path('gpt/system', info, LOG)
- self.assertEqual('gpt/system', devpth)
-
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_runcmd.py b/tests/unittests/test_handler/test_handler_runcmd.py
deleted file mode 100644
index 73237d68..00000000
--- a/tests/unittests/test_handler/test_handler_runcmd.py
+++ /dev/null
@@ -1,121 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit.config.cc_runcmd import handle, schema
-from cloudinit.sources import DataSourceNone
-from cloudinit import (distros, helpers, cloud, subp, util)
-from cloudinit.tests.helpers import (
- CiTestCase, FilesystemMockingTestCase, SchemaTestCaseMixin,
- skipUnlessJsonSchema)
-
-import logging
-import os
-import stat
-
-LOG = logging.getLogger(__name__)
-
-
-class TestRuncmd(FilesystemMockingTestCase):
-
- with_logs = True
-
- def setUp(self):
- super(TestRuncmd, self).setUp()
- self.subp = subp.subp
- self.new_root = self.tmp_dir()
-
- def _get_cloud(self, distro):
- self.patchUtils(self.new_root)
- paths = helpers.Paths({'scripts': self.new_root})
- cls = distros.fetch(distro)
- mydist = cls(distro, {}, paths)
- myds = DataSourceNone.DataSourceNone({}, mydist, paths)
- paths.datasource = myds
- return cloud.Cloud(myds, paths, {}, mydist, None)
-
- def test_handler_skip_if_no_runcmd(self):
- """When the provided config doesn't contain runcmd, skip it."""
- cfg = {}
- mycloud = self._get_cloud('ubuntu')
- handle('notimportant', cfg, mycloud, LOG, None)
- self.assertIn(
- "Skipping module named notimportant, no 'runcmd' key",
- self.logs.getvalue())
-
- def test_handler_invalid_command_set(self):
- """Commands which can't be converted to shell will raise errors."""
- invalid_config = {'runcmd': 1}
- cc = self._get_cloud('ubuntu')
- handle('cc_runcmd', invalid_config, cc, LOG, [])
- self.assertIn(
- 'Failed to shellify 1 into file'
- ' /var/lib/cloud/instances/iid-datasource-none/scripts/runcmd',
- self.logs.getvalue())
-
- @skipUnlessJsonSchema()
- def test_handler_schema_validation_warns_non_array_type(self):
- """Schema validation warns of non-array type for runcmd key.
-
- Schema validation is not strict, so runcmd attempts to shellify the
- invalid content.
- """
- invalid_config = {'runcmd': 1}
- cc = self._get_cloud('ubuntu')
- handle('cc_runcmd', invalid_config, cc, LOG, [])
- self.assertIn(
- 'Invalid config:\nruncmd: 1 is not of type \'array\'',
- self.logs.getvalue())
- self.assertIn('Failed to shellify', self.logs.getvalue())
-
- @skipUnlessJsonSchema()
- def test_handler_schema_validation_warns_non_array_item_type(self):
- """Schema validation warns of non-array or string runcmd items.
-
- Schema validation is not strict, so runcmd attempts to shellify the
- invalid content.
- """
- invalid_config = {
- 'runcmd': ['ls /', 20, ['wget', 'http://stuff/blah'], {'a': 'n'}]}
- cc = self._get_cloud('ubuntu')
- handle('cc_runcmd', invalid_config, cc, LOG, [])
- expected_warnings = [
- 'runcmd.1: 20 is not valid under any of the given schemas',
- 'runcmd.3: {\'a\': \'n\'} is not valid under any of the given'
- ' schema'
- ]
- logs = self.logs.getvalue()
- for warning in expected_warnings:
- self.assertIn(warning, logs)
- self.assertIn('Failed to shellify', logs)
-
- def test_handler_write_valid_runcmd_schema_to_file(self):
- """Valid runcmd schema is written to a runcmd shell script."""
- valid_config = {'runcmd': [['ls', '/']]}
- cc = self._get_cloud('ubuntu')
- handle('cc_runcmd', valid_config, cc, LOG, [])
- runcmd_file = os.path.join(
- self.new_root,
- 'var/lib/cloud/instances/iid-datasource-none/scripts/runcmd')
- self.assertEqual("#!/bin/sh\n'ls' '/'\n", util.load_file(runcmd_file))
- file_stat = os.stat(runcmd_file)
- self.assertEqual(0o700, stat.S_IMODE(file_stat.st_mode))
-
-
-@skipUnlessJsonSchema()
-class TestSchema(CiTestCase, SchemaTestCaseMixin):
- """Directly test schema rather than through handle."""
-
- schema = schema
-
- def test_duplicates_are_fine_array_array(self):
- """Duplicated commands array/array entries are allowed."""
- self.assertSchemaValid(
- [["echo", "bye"], ["echo", "bye"]],
- "command entries can be duplicate.")
-
- def test_duplicates_are_fine_array_string(self):
- """Duplicated commands array/string entries are allowed."""
- self.assertSchemaValid(
- ["echo bye", "echo bye"],
- "command entries can be duplicate.")
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_seed_random.py b/tests/unittests/test_handler/test_handler_seed_random.py
deleted file mode 100644
index 85167f19..00000000
--- a/tests/unittests/test_handler/test_handler_seed_random.py
+++ /dev/null
@@ -1,221 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-# Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
-#
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# Based on test_handler_set_hostname.py
-#
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit.config import cc_seed_random
-
-import gzip
-import tempfile
-from io import BytesIO
-
-from cloudinit import cloud
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import subp
-from cloudinit import util
-
-from cloudinit.sources import DataSourceNone
-
-from cloudinit.tests import helpers as t_help
-
-import logging
-
-LOG = logging.getLogger(__name__)
-
-
-class TestRandomSeed(t_help.TestCase):
- def setUp(self):
- super(TestRandomSeed, self).setUp()
- self._seed_file = tempfile.mktemp()
- self.unapply = []
-
- # by default 'which' has nothing in its path
- self.apply_patches([(subp, 'which', self._which)])
- self.apply_patches([(subp, 'subp', self._subp)])
- self.subp_called = []
- self.whichdata = {}
-
- def tearDown(self):
- apply_patches([i for i in reversed(self.unapply)])
- util.del_file(self._seed_file)
-
- def apply_patches(self, patches):
- ret = apply_patches(patches)
- self.unapply += ret
-
- def _which(self, program):
- return self.whichdata.get(program)
-
- def _subp(self, *args, **kwargs):
- # supports subp calling with cmd as args or kwargs
- if 'args' not in kwargs:
- kwargs['args'] = args[0]
- self.subp_called.append(kwargs)
- return
-
- def _compress(self, text):
- contents = BytesIO()
- gz_fh = gzip.GzipFile(mode='wb', fileobj=contents)
- gz_fh.write(text)
- gz_fh.close()
- return contents.getvalue()
-
- def _get_cloud(self, distro, metadata=None):
- paths = helpers.Paths({})
- cls = distros.fetch(distro)
- ubuntu_distro = cls(distro, {}, paths)
- ds = DataSourceNone.DataSourceNone({}, ubuntu_distro, paths)
- if metadata:
- ds.metadata = metadata
- return cloud.Cloud(ds, paths, {}, ubuntu_distro, None)
-
- def test_append_random(self):
- cfg = {
- 'random_seed': {
- 'file': self._seed_file,
- 'data': 'tiny-tim-was-here',
- }
- }
- cc_seed_random.handle('test', cfg, self._get_cloud('ubuntu'), LOG, [])
- contents = util.load_file(self._seed_file)
- self.assertEqual("tiny-tim-was-here", contents)
-
- def test_append_random_unknown_encoding(self):
- data = self._compress(b"tiny-toe")
- cfg = {
- 'random_seed': {
- 'file': self._seed_file,
- 'data': data,
- 'encoding': 'special_encoding',
- }
- }
- self.assertRaises(IOError, cc_seed_random.handle, 'test', cfg,
- self._get_cloud('ubuntu'), LOG, [])
-
- def test_append_random_gzip(self):
- data = self._compress(b"tiny-toe")
- cfg = {
- 'random_seed': {
- 'file': self._seed_file,
- 'data': data,
- 'encoding': 'gzip',
- }
- }
- cc_seed_random.handle('test', cfg, self._get_cloud('ubuntu'), LOG, [])
- contents = util.load_file(self._seed_file)
- self.assertEqual("tiny-toe", contents)
-
- def test_append_random_gz(self):
- data = self._compress(b"big-toe")
- cfg = {
- 'random_seed': {
- 'file': self._seed_file,
- 'data': data,
- 'encoding': 'gz',
- }
- }
- cc_seed_random.handle('test', cfg, self._get_cloud('ubuntu'), LOG, [])
- contents = util.load_file(self._seed_file)
- self.assertEqual("big-toe", contents)
-
- def test_append_random_base64(self):
- data = util.b64e('bubbles')
- cfg = {
- 'random_seed': {
- 'file': self._seed_file,
- 'data': data,
- 'encoding': 'base64',
- }
- }
- cc_seed_random.handle('test', cfg, self._get_cloud('ubuntu'), LOG, [])
- contents = util.load_file(self._seed_file)
- self.assertEqual("bubbles", contents)
-
- def test_append_random_b64(self):
- data = util.b64e('kit-kat')
- cfg = {
- 'random_seed': {
- 'file': self._seed_file,
- 'data': data,
- 'encoding': 'b64',
- }
- }
- cc_seed_random.handle('test', cfg, self._get_cloud('ubuntu'), LOG, [])
- contents = util.load_file(self._seed_file)
- self.assertEqual("kit-kat", contents)
-
- def test_append_random_metadata(self):
- cfg = {
- 'random_seed': {
- 'file': self._seed_file,
- 'data': 'tiny-tim-was-here',
- }
- }
- c = self._get_cloud('ubuntu', {'random_seed': '-so-was-josh'})
- cc_seed_random.handle('test', cfg, c, LOG, [])
- contents = util.load_file(self._seed_file)
- self.assertEqual('tiny-tim-was-here-so-was-josh', contents)
-
- def test_seed_command_provided_and_available(self):
- c = self._get_cloud('ubuntu', {})
- self.whichdata = {'pollinate': '/usr/bin/pollinate'}
- cfg = {'random_seed': {'command': ['pollinate', '-q']}}
- cc_seed_random.handle('test', cfg, c, LOG, [])
-
- subp_args = [f['args'] for f in self.subp_called]
- self.assertIn(['pollinate', '-q'], subp_args)
-
- def test_seed_command_not_provided(self):
- c = self._get_cloud('ubuntu', {})
- self.whichdata = {}
- cc_seed_random.handle('test', {}, c, LOG, [])
-
- # subp should not have been called as which would say not available
- self.assertFalse(self.subp_called)
-
- def test_unavailable_seed_command_and_required_raises_error(self):
- c = self._get_cloud('ubuntu', {})
- self.whichdata = {}
- cfg = {'random_seed': {'command': ['THIS_NO_COMMAND'],
- 'command_required': True}}
- self.assertRaises(ValueError, cc_seed_random.handle,
- 'test', cfg, c, LOG, [])
-
- def test_seed_command_and_required(self):
- c = self._get_cloud('ubuntu', {})
- self.whichdata = {'foo': 'foo'}
- cfg = {'random_seed': {'command_required': True, 'command': ['foo']}}
- cc_seed_random.handle('test', cfg, c, LOG, [])
-
- self.assertIn(['foo'], [f['args'] for f in self.subp_called])
-
- def test_file_in_environment_for_command(self):
- c = self._get_cloud('ubuntu', {})
- self.whichdata = {'foo': 'foo'}
- cfg = {'random_seed': {'command_required': True, 'command': ['foo'],
- 'file': self._seed_file}}
- cc_seed_random.handle('test', cfg, c, LOG, [])
-
- # this just instists that the first time subp was called,
- # RANDOM_SEED_FILE was in the environment set up correctly
- subp_env = [f['env'] for f in self.subp_called]
- self.assertEqual(subp_env[0].get('RANDOM_SEED_FILE'), self._seed_file)
-
-
-def apply_patches(patches):
- ret = []
- for (ref, name, replace) in patches:
- if replace is None:
- continue
- orig = getattr(ref, name)
- setattr(ref, name, replace)
- ret.append((ref, name, orig))
- return ret
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_set_hostname.py b/tests/unittests/test_handler/test_handler_set_hostname.py
deleted file mode 100644
index 58abf51a..00000000
--- a/tests/unittests/test_handler/test_handler_set_hostname.py
+++ /dev/null
@@ -1,126 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit.config import cc_set_hostname
-
-from cloudinit import cloud
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import util
-
-from cloudinit.tests import helpers as t_help
-
-from configobj import ConfigObj
-import logging
-import os
-import shutil
-import tempfile
-from io import BytesIO
-
-LOG = logging.getLogger(__name__)
-
-
-class TestHostname(t_help.FilesystemMockingTestCase):
-
- with_logs = True
-
- def setUp(self):
- super(TestHostname, self).setUp()
- self.tmp = tempfile.mkdtemp()
- util.ensure_dir(os.path.join(self.tmp, 'data'))
- self.addCleanup(shutil.rmtree, self.tmp)
-
- def _fetch_distro(self, kind):
- cls = distros.fetch(kind)
- paths = helpers.Paths({'cloud_dir': self.tmp})
- return cls(kind, {}, paths)
-
- def test_write_hostname_rhel(self):
- cfg = {
- 'hostname': 'blah.blah.blah.yahoo.com',
- }
- distro = self._fetch_distro('rhel')
- paths = helpers.Paths({'cloud_dir': self.tmp})
- ds = None
- cc = cloud.Cloud(ds, paths, {}, distro, None)
- self.patchUtils(self.tmp)
- cc_set_hostname.handle('cc_set_hostname',
- cfg, cc, LOG, [])
- if not distro.uses_systemd():
- contents = util.load_file("/etc/sysconfig/network", decode=False)
- n_cfg = ConfigObj(BytesIO(contents))
- self.assertEqual({'HOSTNAME': 'blah.blah.blah.yahoo.com'},
- dict(n_cfg))
-
- def test_write_hostname_debian(self):
- cfg = {
- 'hostname': 'blah.blah.blah.yahoo.com',
- }
- distro = self._fetch_distro('debian')
- paths = helpers.Paths({'cloud_dir': self.tmp})
- ds = None
- cc = cloud.Cloud(ds, paths, {}, distro, None)
- self.patchUtils(self.tmp)
- cc_set_hostname.handle('cc_set_hostname',
- cfg, cc, LOG, [])
- contents = util.load_file("/etc/hostname")
- self.assertEqual('blah', contents.strip())
-
- def test_write_hostname_sles(self):
- cfg = {
- 'hostname': 'blah.blah.blah.suse.com',
- }
- distro = self._fetch_distro('sles')
- paths = helpers.Paths({'cloud_dir': self.tmp})
- ds = None
- cc = cloud.Cloud(ds, paths, {}, distro, None)
- self.patchUtils(self.tmp)
- cc_set_hostname.handle('cc_set_hostname', cfg, cc, LOG, [])
- if not distro.uses_systemd():
- contents = util.load_file(distro.hostname_conf_fn)
- self.assertEqual('blah', contents.strip())
-
- def test_multiple_calls_skips_unchanged_hostname(self):
- """Only new hostname or fqdn values will generate a hostname call."""
- distro = self._fetch_distro('debian')
- paths = helpers.Paths({'cloud_dir': self.tmp})
- ds = None
- cc = cloud.Cloud(ds, paths, {}, distro, None)
- self.patchUtils(self.tmp)
- cc_set_hostname.handle(
- 'cc_set_hostname', {'hostname': 'hostname1.me.com'}, cc, LOG, [])
- contents = util.load_file("/etc/hostname")
- self.assertEqual('hostname1', contents.strip())
- cc_set_hostname.handle(
- 'cc_set_hostname', {'hostname': 'hostname1.me.com'}, cc, LOG, [])
- self.assertIn(
- 'DEBUG: No hostname changes. Skipping set-hostname\n',
- self.logs.getvalue())
- cc_set_hostname.handle(
- 'cc_set_hostname', {'hostname': 'hostname2.me.com'}, cc, LOG, [])
- contents = util.load_file("/etc/hostname")
- self.assertEqual('hostname2', contents.strip())
- self.assertIn(
- 'Non-persistently setting the system hostname to hostname2',
- self.logs.getvalue())
-
- def test_error_on_distro_set_hostname_errors(self):
- """Raise SetHostnameError on exceptions from distro.set_hostname."""
- distro = self._fetch_distro('debian')
-
- def set_hostname_error(hostname, fqdn):
- raise Exception("OOPS on: %s" % fqdn)
-
- distro.set_hostname = set_hostname_error
- paths = helpers.Paths({'cloud_dir': self.tmp})
- ds = None
- cc = cloud.Cloud(ds, paths, {}, distro, None)
- self.patchUtils(self.tmp)
- with self.assertRaises(cc_set_hostname.SetHostnameError) as ctx_mgr:
- cc_set_hostname.handle(
- 'somename', {'hostname': 'hostname1.me.com'}, cc, LOG, [])
- self.assertEqual(
- 'Failed to set the hostname to hostname1.me.com (hostname1):'
- ' OOPS on: hostname1.me.com',
- str(ctx_mgr.exception))
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_yum_add_repo.py b/tests/unittests/test_handler/test_handler_yum_add_repo.py
deleted file mode 100644
index 7c61bbf9..00000000
--- a/tests/unittests/test_handler/test_handler_yum_add_repo.py
+++ /dev/null
@@ -1,111 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import configparser
-import logging
-import shutil
-import tempfile
-
-from cloudinit import util
-from cloudinit.config import cc_yum_add_repo
-from cloudinit.tests import helpers
-
-LOG = logging.getLogger(__name__)
-
-
-class TestConfig(helpers.FilesystemMockingTestCase):
- def setUp(self):
- super(TestConfig, self).setUp()
- self.tmp = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.tmp)
-
- def test_bad_config(self):
- cfg = {
- 'yum_repos': {
- 'epel-testing': {
- 'name': 'Extra Packages for Enterprise Linux 5 - Testing',
- # Missing this should cause the repo not to be written
- # 'baseurl': 'http://blah.org/pub/epel/testing/5/$barch',
- 'enabled': False,
- 'gpgcheck': True,
- 'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL',
- 'failovermethod': 'priority',
- },
- },
- }
- self.patchUtils(self.tmp)
- cc_yum_add_repo.handle('yum_add_repo', cfg, None, LOG, [])
- self.assertRaises(IOError, util.load_file,
- "/etc/yum.repos.d/epel_testing.repo")
-
- def test_write_config(self):
- cfg = {
- 'yum_repos': {
- 'epel-testing': {
- 'name': 'Extra Packages for Enterprise Linux 5 - Testing',
- 'baseurl': 'http://blah.org/pub/epel/testing/5/$basearch',
- 'enabled': False,
- 'gpgcheck': True,
- 'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL',
- 'failovermethod': 'priority',
- },
- },
- }
- self.patchUtils(self.tmp)
- cc_yum_add_repo.handle('yum_add_repo', cfg, None, LOG, [])
- contents = util.load_file("/etc/yum.repos.d/epel_testing.repo")
- parser = configparser.ConfigParser()
- parser.read_string(contents)
- expected = {
- 'epel_testing': {
- 'name': 'Extra Packages for Enterprise Linux 5 - Testing',
- 'failovermethod': 'priority',
- 'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL',
- 'enabled': '0',
- 'baseurl': 'http://blah.org/pub/epel/testing/5/$basearch',
- 'gpgcheck': '1',
- }
- }
- for section in expected:
- self.assertTrue(parser.has_section(section),
- "Contains section {0}".format(section))
- for k, v in expected[section].items():
- self.assertEqual(parser.get(section, k), v)
-
- def test_write_config_array(self):
- cfg = {
- 'yum_repos': {
- 'puppetlabs-products': {
- 'name': 'Puppet Labs Products El 6 - $basearch',
- 'baseurl':
- 'http://yum.puppetlabs.com/el/6/products/$basearch',
- 'gpgkey': [
- 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppetlabs',
- 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppet',
- ],
- 'enabled': True,
- 'gpgcheck': True,
- }
- }
- }
- self.patchUtils(self.tmp)
- cc_yum_add_repo.handle('yum_add_repo', cfg, None, LOG, [])
- contents = util.load_file("/etc/yum.repos.d/puppetlabs_products.repo")
- parser = configparser.ConfigParser()
- parser.read_string(contents)
- expected = {
- 'puppetlabs_products': {
- 'name': 'Puppet Labs Products El 6 - $basearch',
- 'baseurl': 'http://yum.puppetlabs.com/el/6/products/$basearch',
- 'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppetlabs\n'
- 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppet',
- 'enabled': '1',
- 'gpgcheck': '1',
- }
- }
- for section in expected:
- self.assertTrue(parser.has_section(section),
- "Contains section {0}".format(section))
- for k, v in expected[section].items():
- self.assertEqual(parser.get(section, k), v)
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_schema.py b/tests/unittests/test_handler/test_schema.py
deleted file mode 100644
index 15aa77bb..00000000
--- a/tests/unittests/test_handler/test_schema.py
+++ /dev/null
@@ -1,554 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-import cloudinit
-from cloudinit.config.schema import (
- CLOUD_CONFIG_HEADER, SchemaValidationError, annotated_cloudconfig_file,
- get_schema_doc, get_schema, validate_cloudconfig_file,
- validate_cloudconfig_schema, main)
-from cloudinit.util import write_file
-
-from cloudinit.tests.helpers import CiTestCase, mock, skipUnlessJsonSchema
-
-from copy import copy
-import itertools
-import os
-import pytest
-from pathlib import Path
-from textwrap import dedent
-from yaml import safe_load
-
-
-class GetSchemaTest(CiTestCase):
-
- def test_get_schema_coalesces_known_schema(self):
- """Every cloudconfig module with schema is listed in allOf keyword."""
- schema = get_schema()
- self.assertCountEqual(
- [
- 'cc_apk_configure',
- 'cc_apt_configure',
- 'cc_bootcmd',
- 'cc_locale',
- 'cc_ntp',
- 'cc_resizefs',
- 'cc_runcmd',
- 'cc_snap',
- 'cc_ubuntu_advantage',
- 'cc_ubuntu_drivers',
- 'cc_write_files',
- 'cc_zypper_add_repo',
- 'cc_chef'
- ],
- [subschema['id'] for subschema in schema['allOf']])
- self.assertEqual('cloud-config-schema', schema['id'])
- self.assertEqual(
- 'http://json-schema.org/draft-04/schema#',
- schema['$schema'])
- # FULL_SCHEMA is updated by the get_schema call
- from cloudinit.config.schema import FULL_SCHEMA
- self.assertCountEqual(['id', '$schema', 'allOf'], FULL_SCHEMA.keys())
-
- def test_get_schema_returns_global_when_set(self):
- """When FULL_SCHEMA global is already set, get_schema returns it."""
- m_schema_path = 'cloudinit.config.schema.FULL_SCHEMA'
- with mock.patch(m_schema_path, {'here': 'iam'}):
- self.assertEqual({'here': 'iam'}, get_schema())
-
-
-class SchemaValidationErrorTest(CiTestCase):
- """Test validate_cloudconfig_schema"""
-
- def test_schema_validation_error_expects_schema_errors(self):
- """SchemaValidationError is initialized from schema_errors."""
- errors = (('key.path', 'unexpected key "junk"'),
- ('key2.path', '"-123" is not a valid "hostname" format'))
- exception = SchemaValidationError(schema_errors=errors)
- self.assertIsInstance(exception, Exception)
- self.assertEqual(exception.schema_errors, errors)
- self.assertEqual(
- 'Cloud config schema errors: key.path: unexpected key "junk", '
- 'key2.path: "-123" is not a valid "hostname" format',
- str(exception))
- self.assertTrue(isinstance(exception, ValueError))
-
-
-class ValidateCloudConfigSchemaTest(CiTestCase):
- """Tests for validate_cloudconfig_schema."""
-
- with_logs = True
-
- @skipUnlessJsonSchema()
- def test_validateconfig_schema_non_strict_emits_warnings(self):
- """When strict is False validate_cloudconfig_schema emits warnings."""
- schema = {'properties': {'p1': {'type': 'string'}}}
- validate_cloudconfig_schema({'p1': -1}, schema, strict=False)
- self.assertIn(
- "Invalid config:\np1: -1 is not of type 'string'\n",
- self.logs.getvalue())
-
- @skipUnlessJsonSchema()
- def test_validateconfig_schema_emits_warning_on_missing_jsonschema(self):
- """Warning from validate_cloudconfig_schema when missing jsonschema."""
- schema = {'properties': {'p1': {'type': 'string'}}}
- with mock.patch.dict('sys.modules', **{'jsonschema': ImportError()}):
- validate_cloudconfig_schema({'p1': -1}, schema, strict=True)
- self.assertIn(
- 'Ignoring schema validation. python-jsonschema is not present',
- self.logs.getvalue())
-
- @skipUnlessJsonSchema()
- def test_validateconfig_schema_strict_raises_errors(self):
- """When strict is True validate_cloudconfig_schema raises errors."""
- schema = {'properties': {'p1': {'type': 'string'}}}
- with self.assertRaises(SchemaValidationError) as context_mgr:
- validate_cloudconfig_schema({'p1': -1}, schema, strict=True)
- self.assertEqual(
- "Cloud config schema errors: p1: -1 is not of type 'string'",
- str(context_mgr.exception))
-
- @skipUnlessJsonSchema()
- def test_validateconfig_schema_honors_formats(self):
- """With strict True, validate_cloudconfig_schema errors on format."""
- schema = {
- 'properties': {'p1': {'type': 'string', 'format': 'hostname'}}}
- with self.assertRaises(SchemaValidationError) as context_mgr:
- validate_cloudconfig_schema({'p1': '-1'}, schema, strict=True)
- self.assertEqual(
- "Cloud config schema errors: p1: '-1' is not a 'hostname'",
- str(context_mgr.exception))
-
-
-class TestCloudConfigExamples:
- schema = get_schema()
- params = [
- (schema["id"], example)
- for schema in schema["allOf"] for example in schema["examples"]]
-
- @pytest.mark.parametrize("schema_id,example", params)
- @skipUnlessJsonSchema()
- def test_validateconfig_schema_of_example(self, schema_id, example):
- """ For a given example in a config module we test if it is valid
- according to the unified schema of all config modules
- """
- config_load = safe_load(example)
- validate_cloudconfig_schema(
- config_load, self.schema, strict=True)
-
-
-class ValidateCloudConfigFileTest(CiTestCase):
- """Tests for validate_cloudconfig_file."""
-
- def setUp(self):
- super(ValidateCloudConfigFileTest, self).setUp()
- self.config_file = self.tmp_path('cloudcfg.yaml')
-
- def test_validateconfig_file_error_on_absent_file(self):
- """On absent config_path, validate_cloudconfig_file errors."""
- with self.assertRaises(RuntimeError) as context_mgr:
- validate_cloudconfig_file('/not/here', {})
- self.assertEqual(
- 'Configfile /not/here does not exist',
- str(context_mgr.exception))
-
- def test_validateconfig_file_error_on_invalid_header(self):
- """On invalid header, validate_cloudconfig_file errors.
-
- A SchemaValidationError is raised when the file doesn't begin with
- CLOUD_CONFIG_HEADER.
- """
- write_file(self.config_file, '#junk')
- with self.assertRaises(SchemaValidationError) as context_mgr:
- validate_cloudconfig_file(self.config_file, {})
- self.assertEqual(
- 'Cloud config schema errors: format-l1.c1: File {0} needs to begin'
- ' with "{1}"'.format(
- self.config_file, CLOUD_CONFIG_HEADER.decode()),
- str(context_mgr.exception))
-
- def test_validateconfig_file_error_on_non_yaml_scanner_error(self):
- """On non-yaml scan issues, validate_cloudconfig_file errors."""
- # Generate a scanner error by providing text on a single line with
- # improper indent.
- write_file(self.config_file, '#cloud-config\nasdf:\nasdf')
- with self.assertRaises(SchemaValidationError) as context_mgr:
- validate_cloudconfig_file(self.config_file, {})
- self.assertIn(
- 'schema errors: format-l3.c1: File {0} is not valid yaml.'.format(
- self.config_file),
- str(context_mgr.exception))
-
- def test_validateconfig_file_error_on_non_yaml_parser_error(self):
- """On non-yaml parser issues, validate_cloudconfig_file errors."""
- write_file(self.config_file, '#cloud-config\n{}}')
- with self.assertRaises(SchemaValidationError) as context_mgr:
- validate_cloudconfig_file(self.config_file, {})
- self.assertIn(
- 'schema errors: format-l2.c3: File {0} is not valid yaml.'.format(
- self.config_file),
- str(context_mgr.exception))
-
- @skipUnlessJsonSchema()
- def test_validateconfig_file_sctrictly_validates_schema(self):
- """validate_cloudconfig_file raises errors on invalid schema."""
- schema = {
- 'properties': {'p1': {'type': 'string', 'format': 'hostname'}}}
- write_file(self.config_file, '#cloud-config\np1: "-1"')
- with self.assertRaises(SchemaValidationError) as context_mgr:
- validate_cloudconfig_file(self.config_file, schema)
- self.assertEqual(
- "Cloud config schema errors: p1: '-1' is not a 'hostname'",
- str(context_mgr.exception))
-
-
-class GetSchemaDocTest(CiTestCase):
- """Tests for get_schema_doc."""
-
- def setUp(self):
- super(GetSchemaDocTest, self).setUp()
- self.required_schema = {
- 'title': 'title', 'description': 'description', 'id': 'id',
- 'name': 'name', 'frequency': 'frequency',
- 'distros': ['debian', 'rhel']}
-
- def test_get_schema_doc_returns_restructured_text(self):
- """get_schema_doc returns restructured text for a cloudinit schema."""
- full_schema = copy(self.required_schema)
- full_schema.update(
- {'properties': {
- 'prop1': {'type': 'array', 'description': 'prop-description',
- 'items': {'type': 'integer'}}}})
- self.assertEqual(
- dedent("""
- name
- ----
- **Summary:** title
-
- description
-
- **Internal name:** ``id``
-
- **Module frequency:** frequency
-
- **Supported distros:** debian, rhel
-
- **Config schema**:
- **prop1:** (array of integer) prop-description\n\n"""),
- get_schema_doc(full_schema))
-
- def test_get_schema_doc_handles_multiple_types(self):
- """get_schema_doc delimits multiple property types with a '/'."""
- full_schema = copy(self.required_schema)
- full_schema.update(
- {'properties': {
- 'prop1': {'type': ['string', 'integer'],
- 'description': 'prop-description'}}})
- self.assertIn(
- '**prop1:** (string/integer) prop-description',
- get_schema_doc(full_schema))
-
- def test_get_schema_doc_handles_enum_types(self):
- """get_schema_doc converts enum types to yaml and delimits with '/'."""
- full_schema = copy(self.required_schema)
- full_schema.update(
- {'properties': {
- 'prop1': {'enum': [True, False, 'stuff'],
- 'description': 'prop-description'}}})
- self.assertIn(
- '**prop1:** (true/false/stuff) prop-description',
- get_schema_doc(full_schema))
-
- def test_get_schema_doc_handles_nested_oneof_property_types(self):
- """get_schema_doc describes array items oneOf declarations in type."""
- full_schema = copy(self.required_schema)
- full_schema.update(
- {'properties': {
- 'prop1': {'type': 'array',
- 'items': {
- 'oneOf': [{'type': 'string'},
- {'type': 'integer'}]},
- 'description': 'prop-description'}}})
- self.assertIn(
- '**prop1:** (array of (string)/(integer)) prop-description',
- get_schema_doc(full_schema))
-
- def test_get_schema_doc_handles_string_examples(self):
- """get_schema_doc properly indented examples as a list of strings."""
- full_schema = copy(self.required_schema)
- full_schema.update(
- {'examples': ['ex1:\n [don\'t, expand, "this"]', 'ex2: true'],
- 'properties': {
- 'prop1': {'type': 'array', 'description': 'prop-description',
- 'items': {'type': 'integer'}}}})
- self.assertIn(
- dedent("""
- **Config schema**:
- **prop1:** (array of integer) prop-description
-
- **Examples**::
-
- ex1:
- [don't, expand, "this"]
- # --- Example2 ---
- ex2: true
- """),
- get_schema_doc(full_schema))
-
- def test_get_schema_doc_properly_parse_description(self):
- """get_schema_doc description properly formatted"""
- full_schema = copy(self.required_schema)
- full_schema.update(
- {'properties': {
- 'p1': {
- 'type': 'string',
- 'description': dedent("""\
- This item
- has the
- following options:
-
- - option1
- - option2
- - option3
-
- The default value is
- option1""")
- }
- }}
- )
-
- self.assertIn(
- dedent("""
- **Config schema**:
- **p1:** (string) This item has the following options:
-
- - option1
- - option2
- - option3
-
- The default value is option1
- """),
- get_schema_doc(full_schema))
-
- def test_get_schema_doc_raises_key_errors(self):
- """get_schema_doc raises KeyErrors on missing keys."""
- for key in self.required_schema:
- invalid_schema = copy(self.required_schema)
- invalid_schema.pop(key)
- with self.assertRaises(KeyError) as context_mgr:
- get_schema_doc(invalid_schema)
- self.assertIn(key, str(context_mgr.exception))
-
-
-class AnnotatedCloudconfigFileTest(CiTestCase):
- maxDiff = None
-
- def test_annotated_cloudconfig_file_no_schema_errors(self):
- """With no schema_errors, print the original content."""
- content = b'ntp:\n pools: [ntp1.pools.com]\n'
- self.assertEqual(
- content,
- annotated_cloudconfig_file({}, content, schema_errors=[]))
-
- def test_annotated_cloudconfig_file_schema_annotates_and_adds_footer(self):
- """With schema_errors, error lines are annotated and a footer added."""
- content = dedent("""\
- #cloud-config
- # comment
- ntp:
- pools: [-99, 75]
- """).encode()
- expected = dedent("""\
- #cloud-config
- # comment
- ntp: # E1
- pools: [-99, 75] # E2,E3
-
- # Errors: -------------
- # E1: Some type error
- # E2: -99 is not a string
- # E3: 75 is not a string
-
- """)
- parsed_config = safe_load(content[13:])
- schema_errors = [
- ('ntp', 'Some type error'), ('ntp.pools.0', '-99 is not a string'),
- ('ntp.pools.1', '75 is not a string')]
- self.assertEqual(
- expected,
- annotated_cloudconfig_file(parsed_config, content, schema_errors))
-
- def test_annotated_cloudconfig_file_annotates_separate_line_items(self):
- """Errors are annotated for lists with items on separate lines."""
- content = dedent("""\
- #cloud-config
- # comment
- ntp:
- pools:
- - -99
- - 75
- """).encode()
- expected = dedent("""\
- ntp:
- pools:
- - -99 # E1
- - 75 # E2
- """)
- parsed_config = safe_load(content[13:])
- schema_errors = [
- ('ntp.pools.0', '-99 is not a string'),
- ('ntp.pools.1', '75 is not a string')]
- self.assertIn(
- expected,
- annotated_cloudconfig_file(parsed_config, content, schema_errors))
-
-
-class TestMain:
-
- exclusive_combinations = itertools.combinations(
- ["--system", "--docs all", "--config-file something"], 2
- )
-
- @pytest.mark.parametrize("params", exclusive_combinations)
- def test_main_exclusive_args(self, params, capsys):
- """Main exits non-zero and error on required exclusive args."""
- params = list(itertools.chain(*[a.split() for a in params]))
- with mock.patch('sys.argv', ['mycmd'] + params):
- with pytest.raises(SystemExit) as context_manager:
- main()
- assert 1 == context_manager.value.code
-
- _out, err = capsys.readouterr()
- expected = (
- 'Expected one of --config-file, --system or --docs arguments\n'
- )
- assert expected == err
-
- def test_main_missing_args(self, capsys):
- """Main exits non-zero and reports an error on missing parameters."""
- with mock.patch('sys.argv', ['mycmd']):
- with pytest.raises(SystemExit) as context_manager:
- main()
- assert 1 == context_manager.value.code
-
- _out, err = capsys.readouterr()
- expected = (
- 'Expected one of --config-file, --system or --docs arguments\n'
- )
- assert expected == err
-
- def test_main_absent_config_file(self, capsys):
- """Main exits non-zero when config file is absent."""
- myargs = ['mycmd', '--annotate', '--config-file', 'NOT_A_FILE']
- with mock.patch('sys.argv', myargs):
- with pytest.raises(SystemExit) as context_manager:
- main()
- assert 1 == context_manager.value.code
- _out, err = capsys.readouterr()
- assert 'Configfile NOT_A_FILE does not exist\n' == err
-
- def test_main_prints_docs(self, capsys):
- """When --docs parameter is provided, main generates documentation."""
- myargs = ['mycmd', '--docs', 'all']
- with mock.patch('sys.argv', myargs):
- assert 0 == main(), 'Expected 0 exit code'
- out, _err = capsys.readouterr()
- assert '\nNTP\n---\n' in out
- assert '\nRuncmd\n------\n' in out
-
- def test_main_validates_config_file(self, tmpdir, capsys):
- """When --config-file parameter is provided, main validates schema."""
- myyaml = tmpdir.join('my.yaml')
- myargs = ['mycmd', '--config-file', myyaml.strpath]
- myyaml.write(b'#cloud-config\nntp:') # shortest ntp schema
- with mock.patch('sys.argv', myargs):
- assert 0 == main(), 'Expected 0 exit code'
- out, _err = capsys.readouterr()
- assert 'Valid cloud-config: {0}\n'.format(myyaml) == out
-
- @mock.patch('cloudinit.config.schema.read_cfg_paths')
- @mock.patch('cloudinit.config.schema.os.getuid', return_value=0)
- def test_main_validates_system_userdata(
- self, m_getuid, m_read_cfg_paths, capsys, paths
- ):
- """When --system is provided, main validates system userdata."""
- m_read_cfg_paths.return_value = paths
- ud_file = paths.get_ipath_cur("userdata_raw")
- write_file(ud_file, b'#cloud-config\nntp:')
- myargs = ['mycmd', '--system']
- with mock.patch('sys.argv', myargs):
- assert 0 == main(), 'Expected 0 exit code'
- out, _err = capsys.readouterr()
- assert 'Valid cloud-config: system userdata\n' == out
-
- @mock.patch('cloudinit.config.schema.os.getuid', return_value=1000)
- def test_main_system_userdata_requires_root(self, m_getuid, capsys, paths):
- """Non-root user can't use --system param"""
- myargs = ['mycmd', '--system']
- with mock.patch('sys.argv', myargs):
- with pytest.raises(SystemExit) as context_manager:
- main()
- assert 1 == context_manager.value.code
- _out, err = capsys.readouterr()
- expected = (
- 'Unable to read system userdata as non-root user. Try using sudo\n'
- )
- assert expected == err
-
-
-class CloudTestsIntegrationTest(CiTestCase):
- """Validate all cloud-config yaml schema provided in integration tests.
-
- It is less expensive to have unittests validate schema of all cloud-config
- yaml provided to integration tests, than to run an integration test which
- raises Warnings or errors on invalid cloud-config schema.
- """
-
- @skipUnlessJsonSchema()
- def test_all_integration_test_cloud_config_schema(self):
- """Validate schema of cloud_tests yaml files looking for warnings."""
- schema = get_schema()
- testsdir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
- integration_testdir = os.path.sep.join(
- [testsdir, 'cloud_tests', 'testcases'])
- errors = []
-
- yaml_files = []
- for root, _dirnames, filenames in os.walk(integration_testdir):
- yaml_files.extend([os.path.join(root, f)
- for f in filenames if f.endswith(".yaml")])
- self.assertTrue(len(yaml_files) > 0)
-
- for filename in yaml_files:
- test_cfg = safe_load(open(filename))
- cloud_config = test_cfg.get('cloud_config')
- if cloud_config:
- cloud_config = safe_load(
- cloud_config.replace("#cloud-config\n", ""))
- try:
- validate_cloudconfig_schema(
- cloud_config, schema, strict=True)
- except SchemaValidationError as e:
- errors.append(
- '{0}: {1}'.format(
- filename, e))
- if errors:
- raise AssertionError(', '.join(errors))
-
-
-def _get_schema_doc_examples():
- examples_dir = Path(
- cloudinit.__file__).parent.parent / 'doc' / 'examples'
- assert examples_dir.is_dir()
-
- all_text_files = (f for f in examples_dir.glob('cloud-config*.txt')
- if not f.name.startswith('cloud-config-archive'))
- return all_text_files
-
-
-class TestSchemaDocExamples:
- schema = get_schema()
-
- @pytest.mark.parametrize("example_path", _get_schema_doc_examples())
- @skipUnlessJsonSchema()
- def test_schema_doc_examples(self, example_path):
- validate_cloudconfig_file(str(example_path), self.schema)
-
-# vi: ts=4 expandtab syntax=python
diff --git a/tests/unittests/test_helpers.py b/tests/unittests/test_helpers.py
index 2e4582a0..69291597 100644
--- a/tests/unittests/test_helpers.py
+++ b/tests/unittests/test_helpers.py
@@ -3,10 +3,10 @@
"""Tests of the built-in user data handlers."""
import os
-
-from cloudinit.tests import helpers as test_helpers
+from pathlib import Path
from cloudinit import sources
+from tests.unittests import helpers as test_helpers
class MyDataSource(sources.DataSource):
@@ -24,8 +24,9 @@ class TestPaths(test_helpers.ResourceUsingTestCase):
mypaths = self.getCloudPaths(myds)
self.assertEqual(
- os.path.join(mypaths.cloud_dir, 'instances', safe_iid),
- mypaths.get_ipath())
+ os.path.join(mypaths.cloud_dir, "instances", safe_iid),
+ mypaths.get_ipath(),
+ )
def test_get_ipath_and_empty_instance_id_returns_none(self):
myds = MyDataSource(sys_cfg={}, distro=None, paths={})
@@ -34,4 +35,35 @@ class TestPaths(test_helpers.ResourceUsingTestCase):
self.assertIsNone(mypaths.get_ipath())
+
+class Testcloud_init_project_dir:
+ top_dir = test_helpers.get_top_level_dir()
+
+ @staticmethod
+ def _get_top_level_dir_alt_implementation():
+ """Alternative implementation for comparing against.
+
+ Note: Recursively searching for .git/ fails during build tests due to
+ .git not existing. This implementation assumes that ../../../ is the
+ relative path to the cloud-init project directory form this file.
+ """
+ out = Path(__file__).parent.parent.parent.resolve()
+ return out
+
+ def test_top_level_dir(self):
+ """Assert the location of the top project directory is correct"""
+ assert self.top_dir == self._get_top_level_dir_alt_implementation()
+
+ def test_cloud_init_project_dir(self):
+ """Assert cloud_init_project_dir produces an expected location
+
+ Compare the returned value to an alternate (naive) implementation
+ """
+ assert (
+ str(Path(self.top_dir, "test"))
+ == test_helpers.cloud_init_project_dir("test")
+ == str(Path(self._get_top_level_dir_alt_implementation(), "test"))
+ )
+
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_log.py b/tests/unittests/test_log.py
index e069a487..87c69dbb 100644
--- a/tests/unittests/test_log.py
+++ b/tests/unittests/test_log.py
@@ -9,11 +9,10 @@ import time
from cloudinit import log as ci_logging
from cloudinit.analyze.dump import CLOUD_INIT_ASCTIME_FMT
-from cloudinit.tests.helpers import CiTestCase
+from tests.unittests.helpers import CiTestCase
class TestCloudInitLogger(CiTestCase):
-
def setUp(self):
# set up a logger like cloud-init does in setupLogging, but instead
# of sys.stderr, we'll plug in a StringIO() object so we can see
@@ -26,7 +25,7 @@ class TestCloudInitLogger(CiTestCase):
console.setLevel(ci_logging.DEBUG)
self.ci_root.addHandler(console)
self.ci_root.setLevel(ci_logging.DEBUG)
- self.LOG = logging.getLogger('test_cloudinit_logger')
+ self.LOG = logging.getLogger("test_cloudinit_logger")
def test_logger_uses_gmtime(self):
"""Test that log message have timestamp in UTC (gmtime)"""
@@ -43,15 +42,16 @@ class TestCloudInitLogger(CiTestCase):
# utc_after : 2017-08-23 14:19:43.570064
utc_before = datetime.datetime.utcnow() - datetime.timedelta(0, 0.5)
- self.LOG.error('Test message')
+ self.LOG.error("Test message")
utc_after = datetime.datetime.utcnow() + datetime.timedelta(0, 0.5)
# extract timestamp from log:
# 2017-08-23 14:19:43,069 - test_log.py[ERROR]: Test message
logstr = self.ci_logs.getvalue().splitlines()[0]
- timestampstr = logstr.split(' - ')[0]
- parsed_dt = datetime.datetime.strptime(timestampstr,
- CLOUD_INIT_ASCTIME_FMT)
+ timestampstr = logstr.split(" - ")[0]
+ parsed_dt = datetime.datetime.strptime(
+ timestampstr, CLOUD_INIT_ASCTIME_FMT
+ )
self.assertLess(utc_before, parsed_dt)
self.assertLess(parsed_dt, utc_after)
diff --git a/tests/unittests/test_merging.py b/tests/unittests/test_merging.py
index 10871bcf..cf484dda 100644
--- a/tests/unittests/test_merging.py
+++ b/tests/unittests/test_merging.py
@@ -1,13 +1,5 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.tests import helpers
-
-from cloudinit.handlers import cloud_config
-from cloudinit.handlers import (CONTENT_START, CONTENT_END)
-
-from cloudinit import helpers as c_helpers
-from cloudinit import util
-
import collections
import glob
import os
@@ -15,6 +7,11 @@ import random
import re
import string
+from cloudinit import helpers as c_helpers
+from cloudinit import util
+from cloudinit.handlers import CONTENT_END, CONTENT_START, cloud_config
+from tests.unittests import helpers
+
SOURCE_PAT = "source*.*yaml"
EXPECTED_PAT = "expected%s.yaml"
TYPES = [dict, str, list, tuple, None, int]
@@ -43,7 +40,7 @@ def _old_mergemanydict(*args):
def _random_str(rand):
- base = ''
+ base = ""
for _i in range(rand.randint(1, 2 ** 8)):
base += rand.choice(string.ascii_letters + string.digits)
return base
@@ -98,7 +95,7 @@ def make_dict(max_depth, seed=None):
class TestSimpleRun(helpers.ResourceUsingTestCase):
def _load_merge_files(self):
- merge_root = helpers.resourceLocation('merge_sources')
+ merge_root = helpers.resourceLocation("merge_sources")
tests = []
source_ids = collections.defaultdict(list)
expected_files = {}
@@ -106,8 +103,9 @@ class TestSimpleRun(helpers.ResourceUsingTestCase):
base_fn = os.path.basename(fn)
file_id = re.match(r"source(\d+)\-(\d+)[.]yaml", base_fn)
if not file_id:
- raise IOError("File %s does not have a numeric identifier"
- % (fn))
+ raise IOError(
+ "File %s does not have a numeric identifier" % (fn)
+ )
file_id = int(file_id.group(1))
source_ids[file_id].append(fn)
expected_fn = os.path.join(merge_root, EXPECTED_PAT % (file_id))
@@ -141,29 +139,31 @@ class TestSimpleRun(helpers.ResourceUsingTestCase):
cc_handler = cloud_config.CloudConfigPartHandler(paths)
cc_handler.cloud_fn = None
for (payloads, (expected_merge, expected_fn)) in tests:
- cc_handler.handle_part(None, CONTENT_START, None,
- None, None, None)
+ cc_handler.handle_part(None, CONTENT_START, None, None, None, None)
merging_fns = []
for (fn, contents) in payloads:
- cc_handler.handle_part(None, None, "%s.yaml" % (fn),
- contents, None, {})
+ cc_handler.handle_part(
+ None, None, "%s.yaml" % (fn), contents, None, {}
+ )
merging_fns.append(fn)
merged_buf = cc_handler.cloud_buf
- cc_handler.handle_part(None, CONTENT_END, None,
- None, None, None)
+ cc_handler.handle_part(None, CONTENT_END, None, None, None, None)
fail_msg = "Equality failure on checking %s with %s: %s != %s"
- fail_msg = fail_msg % (expected_fn,
- ",".join(merging_fns), merged_buf,
- expected_merge)
+ fail_msg = fail_msg % (
+ expected_fn,
+ ",".join(merging_fns),
+ merged_buf,
+ expected_merge,
+ )
self.assertEqual(expected_merge, merged_buf, msg=fail_msg)
def test_compat_merges_dict(self):
a = {
- '1': '2',
- 'b': 'c',
+ "1": "2",
+ "b": "c",
}
b = {
- 'b': 'e',
+ "b": "e",
}
c = _old_mergedict(a, b)
d = util.mergemanydict([a, b])
@@ -171,53 +171,53 @@ class TestSimpleRun(helpers.ResourceUsingTestCase):
def test_compat_merges_dict2(self):
a = {
- 'Blah': 1,
- 'Blah2': 2,
- 'Blah3': 3,
+ "Blah": 1,
+ "Blah2": 2,
+ "Blah3": 3,
}
b = {
- 'Blah': 1,
- 'Blah2': 2,
- 'Blah3': [1],
+ "Blah": 1,
+ "Blah2": 2,
+ "Blah3": [1],
}
c = _old_mergedict(a, b)
d = util.mergemanydict([a, b])
self.assertEqual(c, d)
def test_compat_merges_list(self):
- a = {'b': [1, 2, 3]}
- b = {'b': [4, 5]}
- c = {'b': [6, 7]}
+ a = {"b": [1, 2, 3]}
+ b = {"b": [4, 5]}
+ c = {"b": [6, 7]}
e = _old_mergemanydict(a, b, c)
f = util.mergemanydict([a, b, c])
self.assertEqual(e, f)
def test_compat_merges_str(self):
- a = {'b': "hi"}
- b = {'b': "howdy"}
- c = {'b': "hallo"}
+ a = {"b": "hi"}
+ b = {"b": "howdy"}
+ c = {"b": "hallo"}
e = _old_mergemanydict(a, b, c)
f = util.mergemanydict([a, b, c])
self.assertEqual(e, f)
def test_compat_merge_sub_dict(self):
a = {
- '1': '2',
- 'b': {
- 'f': 'g',
- 'e': 'c',
- 'h': 'd',
- 'hh': {
- '1': 2,
+ "1": "2",
+ "b": {
+ "f": "g",
+ "e": "c",
+ "h": "d",
+ "hh": {
+ "1": 2,
},
- }
+ },
}
b = {
- 'b': {
- 'e': 'c',
- 'hh': {
- '3': 4,
- }
+ "b": {
+ "e": "c",
+ "hh": {
+ "3": 4,
+ },
}
}
c = _old_mergedict(a, b)
@@ -226,14 +226,14 @@ class TestSimpleRun(helpers.ResourceUsingTestCase):
def test_compat_merge_sub_dict2(self):
a = {
- '1': '2',
- 'b': {
- 'f': 'g',
- }
+ "1": "2",
+ "b": {
+ "f": "g",
+ },
}
b = {
- 'b': {
- 'e': 'c',
+ "b": {
+ "e": "c",
}
}
c = _old_mergedict(a, b)
@@ -242,18 +242,19 @@ class TestSimpleRun(helpers.ResourceUsingTestCase):
def test_compat_merge_sub_list(self):
a = {
- '1': '2',
- 'b': {
- 'f': ['1'],
- }
+ "1": "2",
+ "b": {
+ "f": ["1"],
+ },
}
b = {
- 'b': {
- 'f': [],
+ "b": {
+ "f": [],
}
}
c = _old_mergedict(a, b)
d = util.mergemanydict([a, b])
self.assertEqual(c, d)
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py
index 70453683..47e4ba00 100644
--- a/tests/unittests/test_net.py
+++ b/tests/unittests/test_net.py
@@ -1,20 +1,5 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import net
-from cloudinit import distros
-from cloudinit.net import cmdline
-from cloudinit.net import (
- eni, interface_has_own_mac, natural_sort_key, netplan, network_state,
- renderers, sysconfig)
-from cloudinit.sources.helpers import openstack
-from cloudinit import temp_utils
-from cloudinit import subp
-from cloudinit import util
-from cloudinit import safeyaml as yaml
-
-from cloudinit.tests.helpers import (
- CiTestCase, FilesystemMockingTestCase, dir2dict, mock, populate_dir)
-
import base64
import copy
import gzip
@@ -23,9 +8,32 @@ import json
import os
import re
import textwrap
-from yaml.serializer import Serializer
import pytest
+from yaml.serializer import Serializer
+
+from cloudinit import distros, net
+from cloudinit import safeyaml as yaml
+from cloudinit import subp, temp_utils, util
+from cloudinit.net import (
+ cmdline,
+ eni,
+ interface_has_own_mac,
+ natural_sort_key,
+ netplan,
+ network_state,
+ networkd,
+ renderers,
+ sysconfig,
+)
+from cloudinit.sources.helpers import openstack
+from tests.unittests.helpers import (
+ CiTestCase,
+ FilesystemMockingTestCase,
+ dir2dict,
+ mock,
+ populate_dir,
+)
DHCP_CONTENT_1 = """
DEVICE='eth0'
@@ -48,15 +56,19 @@ DOMAINSEARCH='foo.com'
"""
DHCP_EXPECTED_1 = {
- 'name': 'eth0',
- 'type': 'physical',
- 'subnets': [{'broadcast': '192.168.122.255',
- 'control': 'manual',
- 'gateway': '192.168.122.1',
- 'dns_search': ['foo.com'],
- 'type': 'dhcp',
- 'netmask': '255.255.255.0',
- 'dns_nameservers': ['192.168.122.1']}],
+ "name": "eth0",
+ "type": "physical",
+ "subnets": [
+ {
+ "broadcast": "192.168.122.255",
+ "control": "manual",
+ "gateway": "192.168.122.1",
+ "dns_search": ["foo.com"],
+ "type": "dhcp",
+ "netmask": "255.255.255.0",
+ "dns_nameservers": ["192.168.122.1"],
+ }
+ ],
}
DHCP6_CONTENT_1 = """
@@ -73,12 +85,17 @@ DNSDOMAIN=
"""
DHCP6_EXPECTED_1 = {
- 'name': 'eno1',
- 'type': 'physical',
- 'subnets': [{'control': 'manual',
- 'dns_nameservers': ['2001:67c:1562:8010::2:1'],
- 'netmask': '64',
- 'type': 'dhcp6'}]}
+ "name": "eno1",
+ "type": "physical",
+ "subnets": [
+ {
+ "control": "manual",
+ "dns_nameservers": ["2001:67c:1562:8010::2:1"],
+ "netmask": "64",
+ "type": "dhcp6",
+ }
+ ],
+}
STATIC_CONTENT_1 = """
@@ -97,14 +114,20 @@ DOMAINSEARCH='foo.com'
"""
STATIC_EXPECTED_1 = {
- 'name': 'eth1',
- 'type': 'physical',
- 'subnets': [{'broadcast': '10.0.0.255', 'control': 'manual',
- 'gateway': '10.0.0.1',
- 'dns_search': ['foo.com'], 'type': 'static',
- 'netmask': '255.255.255.0',
- 'dns_nameservers': ['10.0.1.1'],
- 'address': '10.0.0.2'}],
+ "name": "eth1",
+ "type": "physical",
+ "subnets": [
+ {
+ "broadcast": "10.0.0.255",
+ "control": "manual",
+ "gateway": "10.0.0.1",
+ "dns_search": ["foo.com"],
+ "type": "static",
+ "netmask": "255.255.255.0",
+ "dns_nameservers": ["10.0.1.1"],
+ "address": "10.0.0.2",
+ }
+ ],
}
V1_NAMESERVER_ALIAS = """
@@ -471,34 +494,42 @@ ethernets:
# Examples (and expected outputs for various renderers).
OS_SAMPLES = [
{
- 'in_data': {
+ "in_data": {
"services": [{"type": "dns", "address": "172.19.0.12"}],
- "networks": [{
- "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4",
- "type": "ipv4", "netmask": "255.255.252.0",
- "link": "tap1a81968a-79",
- "routes": [{
- "netmask": "0.0.0.0",
- "network": "0.0.0.0",
- "gateway": "172.19.3.254",
- }],
- "ip_address": "172.19.1.34", "id": "network0"
- }],
+ "networks": [
+ {
+ "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4",
+ "type": "ipv4",
+ "netmask": "255.255.252.0",
+ "link": "tap1a81968a-79",
+ "routes": [
+ {
+ "netmask": "0.0.0.0",
+ "network": "0.0.0.0",
+ "gateway": "172.19.3.254",
+ }
+ ],
+ "ip_address": "172.19.1.34",
+ "id": "network0",
+ }
+ ],
"links": [
{
"ethernet_mac_address": "fa:16:3e:ed:9a:59",
- "mtu": None, "type": "bridge", "id":
- "tap1a81968a-79",
- "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f"
+ "mtu": None,
+ "type": "bridge",
+ "id": "tap1a81968a-79",
+ "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f",
},
],
},
- 'in_macs': {
- 'fa:16:3e:ed:9a:59': 'eth0',
+ "in_macs": {
+ "fa:16:3e:ed:9a:59": "eth0",
},
- 'out_sysconfig_opensuse': [
- ('etc/sysconfig/network/ifcfg-eth0',
- """
+ "out_sysconfig_opensuse": [
+ (
+ "etc/sysconfig/network/ifcfg-eth0",
+ """
# Created by cloud-init on instance boot automatically, do not edit.
#
BOOTPROTO=static
@@ -506,26 +537,39 @@ IPADDR=172.19.1.34
LLADDR=fa:16:3e:ed:9a:59
NETMASK=255.255.252.0
STARTMODE=auto
-""".lstrip()),
- ('etc/resolv.conf',
- """
+""".lstrip(),
+ ),
+ (
+ "etc/resolv.conf",
+ """
; Created by cloud-init on instance boot automatically, do not edit.
;
nameserver 172.19.0.12
-""".lstrip()),
- ('etc/NetworkManager/conf.d/99-cloud-init.conf',
- """
+""".lstrip(),
+ ),
+ (
+ "etc/NetworkManager/conf.d/99-cloud-init.conf",
+ """
# Created by cloud-init on instance boot automatically, do not edit.
#
[main]
dns = none
-""".lstrip()),
- ('etc/udev/rules.d/85-persistent-net-cloud-init.rules',
- "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ',
- 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))],
- 'out_sysconfig_rhel': [
- ('etc/sysconfig/network-scripts/ifcfg-eth0',
- """
+""".lstrip(),
+ ),
+ (
+ "etc/udev/rules.d/85-persistent-net-cloud-init.rules",
+ "".join(
+ [
+ 'SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ',
+ 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n',
+ ]
+ ),
+ ),
+ ],
+ "out_sysconfig_rhel": [
+ (
+ "etc/sysconfig/network-scripts/ifcfg-eth0",
+ """
# Created by cloud-init on instance boot automatically, do not edit.
#
BOOTPROTO=none
@@ -539,60 +583,82 @@ NM_CONTROLLED=no
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
-""".lstrip()),
- ('etc/resolv.conf',
- """
+""".lstrip(),
+ ),
+ (
+ "etc/resolv.conf",
+ """
; Created by cloud-init on instance boot automatically, do not edit.
;
nameserver 172.19.0.12
-""".lstrip()),
- ('etc/NetworkManager/conf.d/99-cloud-init.conf',
- """
+""".lstrip(),
+ ),
+ (
+ "etc/NetworkManager/conf.d/99-cloud-init.conf",
+ """
# Created by cloud-init on instance boot automatically, do not edit.
#
[main]
dns = none
-""".lstrip()),
- ('etc/udev/rules.d/70-persistent-net.rules',
- "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ',
- 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))]
-
+""".lstrip(),
+ ),
+ (
+ "etc/udev/rules.d/70-persistent-net.rules",
+ "".join(
+ [
+ 'SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ',
+ 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n',
+ ]
+ ),
+ ),
+ ],
},
{
- 'in_data': {
+ "in_data": {
"services": [{"type": "dns", "address": "172.19.0.12"}],
- "networks": [{
- "network_id": "public-ipv4",
- "type": "ipv4", "netmask": "255.255.252.0",
- "link": "tap1a81968a-79",
- "routes": [{
- "netmask": "0.0.0.0",
- "network": "0.0.0.0",
- "gateway": "172.19.3.254",
- }],
- "ip_address": "172.19.1.34", "id": "network0"
- }, {
- "network_id": "private-ipv4",
- "type": "ipv4", "netmask": "255.255.255.0",
- "link": "tap1a81968a-79",
- "routes": [],
- "ip_address": "10.0.0.10", "id": "network1"
- }],
+ "networks": [
+ {
+ "network_id": "public-ipv4",
+ "type": "ipv4",
+ "netmask": "255.255.252.0",
+ "link": "tap1a81968a-79",
+ "routes": [
+ {
+ "netmask": "0.0.0.0",
+ "network": "0.0.0.0",
+ "gateway": "172.19.3.254",
+ }
+ ],
+ "ip_address": "172.19.1.34",
+ "id": "network0",
+ },
+ {
+ "network_id": "private-ipv4",
+ "type": "ipv4",
+ "netmask": "255.255.255.0",
+ "link": "tap1a81968a-79",
+ "routes": [],
+ "ip_address": "10.0.0.10",
+ "id": "network1",
+ },
+ ],
"links": [
{
"ethernet_mac_address": "fa:16:3e:ed:9a:59",
- "mtu": None, "type": "bridge", "id":
- "tap1a81968a-79",
- "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f"
+ "mtu": None,
+ "type": "bridge",
+ "id": "tap1a81968a-79",
+ "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f",
},
],
},
- 'in_macs': {
- 'fa:16:3e:ed:9a:59': 'eth0',
+ "in_macs": {
+ "fa:16:3e:ed:9a:59": "eth0",
},
- 'out_sysconfig_opensuse': [
- ('etc/sysconfig/network/ifcfg-eth0',
- """
+ "out_sysconfig_opensuse": [
+ (
+ "etc/sysconfig/network/ifcfg-eth0",
+ """
# Created by cloud-init on instance boot automatically, do not edit.
#
BOOTPROTO=static
@@ -602,26 +668,39 @@ LLADDR=fa:16:3e:ed:9a:59
NETMASK=255.255.252.0
NETMASK1=255.255.255.0
STARTMODE=auto
-""".lstrip()),
- ('etc/resolv.conf',
- """
+""".lstrip(),
+ ),
+ (
+ "etc/resolv.conf",
+ """
; Created by cloud-init on instance boot automatically, do not edit.
;
nameserver 172.19.0.12
-""".lstrip()),
- ('etc/NetworkManager/conf.d/99-cloud-init.conf',
- """
+""".lstrip(),
+ ),
+ (
+ "etc/NetworkManager/conf.d/99-cloud-init.conf",
+ """
# Created by cloud-init on instance boot automatically, do not edit.
#
[main]
dns = none
-""".lstrip()),
- ('etc/udev/rules.d/85-persistent-net-cloud-init.rules',
- "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ',
- 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))],
- 'out_sysconfig_rhel': [
- ('etc/sysconfig/network-scripts/ifcfg-eth0',
- """
+""".lstrip(),
+ ),
+ (
+ "etc/udev/rules.d/85-persistent-net-cloud-init.rules",
+ "".join(
+ [
+ 'SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ',
+ 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n',
+ ]
+ ),
+ ),
+ ],
+ "out_sysconfig_rhel": [
+ (
+ "etc/sysconfig/network-scripts/ifcfg-eth0",
+ """
# Created by cloud-init on instance boot automatically, do not edit.
#
BOOTPROTO=none
@@ -637,80 +716,106 @@ NM_CONTROLLED=no
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
-""".lstrip()),
- ('etc/resolv.conf',
- """
+""".lstrip(),
+ ),
+ (
+ "etc/resolv.conf",
+ """
; Created by cloud-init on instance boot automatically, do not edit.
;
nameserver 172.19.0.12
-""".lstrip()),
- ('etc/NetworkManager/conf.d/99-cloud-init.conf',
- """
+""".lstrip(),
+ ),
+ (
+ "etc/NetworkManager/conf.d/99-cloud-init.conf",
+ """
# Created by cloud-init on instance boot automatically, do not edit.
#
[main]
dns = none
-""".lstrip()),
- ('etc/udev/rules.d/70-persistent-net.rules',
- "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ',
- 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))]
-
+""".lstrip(),
+ ),
+ (
+ "etc/udev/rules.d/70-persistent-net.rules",
+ "".join(
+ [
+ 'SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ',
+ 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n',
+ ]
+ ),
+ ),
+ ],
},
{
- 'in_data': {
+ "in_data": {
"services": [{"type": "dns", "address": "172.19.0.12"}],
- "networks": [{
- "network_id": "public-ipv4",
- "type": "ipv4", "netmask": "255.255.252.0",
- "link": "tap1a81968a-79",
- "routes": [{
- "netmask": "0.0.0.0",
- "network": "0.0.0.0",
- "gateway": "172.19.3.254",
- }],
- "ip_address": "172.19.1.34", "id": "network0"
- }, {
- "network_id": "public-ipv6-a",
- "type": "ipv6", "netmask": "",
- "link": "tap1a81968a-79",
- "routes": [
- {
- "gateway": "2001:DB8::1",
- "netmask": "::",
- "network": "::"
- }
- ],
- "ip_address": "2001:DB8::10", "id": "network1"
- }, {
- "network_id": "public-ipv6-b",
- "type": "ipv6", "netmask": "64",
- "link": "tap1a81968a-79",
- "routes": [
- ],
- "ip_address": "2001:DB9::10", "id": "network2"
- }, {
- "network_id": "public-ipv6-c",
- "type": "ipv6", "netmask": "64",
- "link": "tap1a81968a-79",
- "routes": [
- ],
- "ip_address": "2001:DB10::10", "id": "network3"
- }],
+ "networks": [
+ {
+ "network_id": "public-ipv4",
+ "type": "ipv4",
+ "netmask": "255.255.252.0",
+ "link": "tap1a81968a-79",
+ "routes": [
+ {
+ "netmask": "0.0.0.0",
+ "network": "0.0.0.0",
+ "gateway": "172.19.3.254",
+ }
+ ],
+ "ip_address": "172.19.1.34",
+ "id": "network0",
+ },
+ {
+ "network_id": "public-ipv6-a",
+ "type": "ipv6",
+ "netmask": "",
+ "link": "tap1a81968a-79",
+ "routes": [
+ {
+ "gateway": "2001:DB8::1",
+ "netmask": "::",
+ "network": "::",
+ }
+ ],
+ "ip_address": "2001:DB8::10",
+ "id": "network1",
+ },
+ {
+ "network_id": "public-ipv6-b",
+ "type": "ipv6",
+ "netmask": "64",
+ "link": "tap1a81968a-79",
+ "routes": [],
+ "ip_address": "2001:DB9::10",
+ "id": "network2",
+ },
+ {
+ "network_id": "public-ipv6-c",
+ "type": "ipv6",
+ "netmask": "64",
+ "link": "tap1a81968a-79",
+ "routes": [],
+ "ip_address": "2001:DB10::10",
+ "id": "network3",
+ },
+ ],
"links": [
{
"ethernet_mac_address": "fa:16:3e:ed:9a:59",
- "mtu": None, "type": "bridge", "id":
- "tap1a81968a-79",
- "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f"
+ "mtu": None,
+ "type": "bridge",
+ "id": "tap1a81968a-79",
+ "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f",
},
],
},
- 'in_macs': {
- 'fa:16:3e:ed:9a:59': 'eth0',
+ "in_macs": {
+ "fa:16:3e:ed:9a:59": "eth0",
},
- 'out_sysconfig_opensuse': [
- ('etc/sysconfig/network/ifcfg-eth0',
- """
+ "out_sysconfig_opensuse": [
+ (
+ "etc/sysconfig/network/ifcfg-eth0",
+ """
# Created by cloud-init on instance boot automatically, do not edit.
#
BOOTPROTO=static
@@ -721,26 +826,39 @@ IPADDR6_2=2001:DB10::10/64
LLADDR=fa:16:3e:ed:9a:59
NETMASK=255.255.252.0
STARTMODE=auto
-""".lstrip()),
- ('etc/resolv.conf',
- """
+""".lstrip(),
+ ),
+ (
+ "etc/resolv.conf",
+ """
; Created by cloud-init on instance boot automatically, do not edit.
;
nameserver 172.19.0.12
-""".lstrip()),
- ('etc/NetworkManager/conf.d/99-cloud-init.conf',
- """
+""".lstrip(),
+ ),
+ (
+ "etc/NetworkManager/conf.d/99-cloud-init.conf",
+ """
# Created by cloud-init on instance boot automatically, do not edit.
#
[main]
dns = none
-""".lstrip()),
- ('etc/udev/rules.d/85-persistent-net-cloud-init.rules',
- "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ',
- 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))],
- 'out_sysconfig_rhel': [
- ('etc/sysconfig/network-scripts/ifcfg-eth0',
- """
+""".lstrip(),
+ ),
+ (
+ "etc/udev/rules.d/85-persistent-net-cloud-init.rules",
+ "".join(
+ [
+ 'SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ',
+ 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n',
+ ]
+ ),
+ ),
+ ],
+ "out_sysconfig_rhel": [
+ (
+ "etc/sysconfig/network-scripts/ifcfg-eth0",
+ """
# Created by cloud-init on instance boot automatically, do not edit.
#
BOOTPROTO=none
@@ -760,24 +878,36 @@ NM_CONTROLLED=no
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
-""".lstrip()),
- ('etc/resolv.conf',
- """
+""".lstrip(),
+ ),
+ (
+ "etc/resolv.conf",
+ """
; Created by cloud-init on instance boot automatically, do not edit.
;
nameserver 172.19.0.12
-""".lstrip()),
- ('etc/NetworkManager/conf.d/99-cloud-init.conf',
- """
+""".lstrip(),
+ ),
+ (
+ "etc/NetworkManager/conf.d/99-cloud-init.conf",
+ """
# Created by cloud-init on instance boot automatically, do not edit.
#
[main]
dns = none
-""".lstrip()),
- ('etc/udev/rules.d/70-persistent-net.rules',
- "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ',
- 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))]
- }
+""".lstrip(),
+ ),
+ (
+ "etc/udev/rules.d/70-persistent-net.rules",
+ "".join(
+ [
+ 'SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ',
+ 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n',
+ ]
+ ),
+ ),
+ ],
+ },
]
EXAMPLE_ENI = """
@@ -820,8 +950,39 @@ iface eth1 inet static
""".lstrip()
NETWORK_CONFIGS = {
- 'small': {
- 'expected_eni': textwrap.dedent("""\
+ "small": {
+ "expected_networkd_eth99": textwrap.dedent(
+ """\
+ [Match]
+ Name=eth99
+ MACAddress=c0:d6:9f:2c:e8:80
+ [Address]
+ Address=192.168.21.3/24
+ [Network]
+ DHCP=ipv4
+ Domains=barley.maas sach.maas
+ Domains=wark.maas
+ DNS=1.2.3.4 5.6.7.8
+ DNS=8.8.8.8 8.8.4.4
+ [Route]
+ Gateway=65.61.151.37
+ Destination=0.0.0.0/0
+ Metric=10000
+ """
+ ).rstrip(" "),
+ "expected_networkd_eth1": textwrap.dedent(
+ """\
+ [Match]
+ Name=eth1
+ MACAddress=cf:d6:af:48:e8:80
+ [Network]
+ DHCP=no
+ Domains=wark.maas
+ DNS=1.2.3.4 5.6.7.8
+ """
+ ).rstrip(" "),
+ "expected_eni": textwrap.dedent(
+ """\
auto lo
iface lo inet loopback
dns-nameservers 1.2.3.4 5.6.7.8
@@ -839,8 +1000,10 @@ NETWORK_CONFIGS = {
dns-search barley.maas sach.maas
post-up route add default gw 65.61.151.37 metric 10000 || true
pre-down route del default gw 65.61.151.37 metric 10000 || true
- """).rstrip(' '),
- 'expected_netplan': textwrap.dedent("""
+ """
+ ).rstrip(" "),
+ "expected_netplan": textwrap.dedent(
+ """
network:
version: 2
ethernets:
@@ -866,29 +1029,37 @@ NETWORK_CONFIGS = {
to: 0.0.0.0/0
via: 65.61.151.37
set-name: eth99
- """).rstrip(' '),
- 'expected_sysconfig_opensuse': {
- 'ifcfg-eth1': textwrap.dedent("""\
+ """
+ ).rstrip(" "),
+ "expected_sysconfig_opensuse": {
+ "ifcfg-eth1": textwrap.dedent(
+ """\
BOOTPROTO=static
LLADDR=cf:d6:af:48:e8:80
- STARTMODE=auto"""),
- 'ifcfg-eth99': textwrap.dedent("""\
+ STARTMODE=auto"""
+ ),
+ "ifcfg-eth99": textwrap.dedent(
+ """\
BOOTPROTO=dhcp4
LLADDR=c0:d6:9f:2c:e8:80
IPADDR=192.168.21.3
NETMASK=255.255.255.0
- STARTMODE=auto"""),
+ STARTMODE=auto"""
+ ),
},
- 'expected_sysconfig_rhel': {
- 'ifcfg-eth1': textwrap.dedent("""\
+ "expected_sysconfig_rhel": {
+ "ifcfg-eth1": textwrap.dedent(
+ """\
BOOTPROTO=none
DEVICE=eth1
HWADDR=cf:d6:af:48:e8:80
NM_CONTROLLED=no
ONBOOT=yes
TYPE=Ethernet
- USERCTL=no"""),
- 'ifcfg-eth99': textwrap.dedent("""\
+ USERCTL=no"""
+ ),
+ "ifcfg-eth99": textwrap.dedent(
+ """\
BOOTPROTO=dhcp
DEFROUTE=yes
DEVICE=eth99
@@ -904,9 +1075,11 @@ NETWORK_CONFIGS = {
NM_CONTROLLED=no
ONBOOT=yes
TYPE=Ethernet
- USERCTL=no"""),
+ USERCTL=no"""
+ ),
},
- 'yaml': textwrap.dedent("""
+ "yaml": textwrap.dedent(
+ """
version: 1
config:
# Physical interfaces.
@@ -935,10 +1108,20 @@ NETWORK_CONFIGS = {
- 5.6.7.8
search:
- wark.maas
- """),
+ """
+ ),
},
- 'v4_and_v6': {
- 'expected_eni': textwrap.dedent("""\
+ "v4_and_v6": {
+ "expected_networkd": textwrap.dedent(
+ """\
+ [Match]
+ Name=iface0
+ [Network]
+ DHCP=yes
+ """
+ ).rstrip(" "),
+ "expected_eni": textwrap.dedent(
+ """\
auto lo
iface lo inet loopback
@@ -947,22 +1130,28 @@ NETWORK_CONFIGS = {
# control-alias iface0
iface iface0 inet6 dhcp
- """).rstrip(' '),
- 'expected_netplan': textwrap.dedent("""
+ """
+ ).rstrip(" "),
+ "expected_netplan": textwrap.dedent(
+ """
network:
version: 2
ethernets:
iface0:
dhcp4: true
dhcp6: true
- """).rstrip(' '),
- 'expected_sysconfig_opensuse': {
- 'ifcfg-iface0': textwrap.dedent("""\
+ """
+ ).rstrip(" "),
+ "expected_sysconfig_opensuse": {
+ "ifcfg-iface0": textwrap.dedent(
+ """\
BOOTPROTO=dhcp
DHCLIENT6_MODE=managed
- STARTMODE=auto""")
+ STARTMODE=auto"""
+ )
},
- 'yaml': textwrap.dedent("""\
+ "yaml": textwrap.dedent(
+ """\
version: 1
config:
- type: 'physical'
@@ -970,10 +1159,25 @@ NETWORK_CONFIGS = {
subnets:
- {'type': 'dhcp4'}
- {'type': 'dhcp6'}
- """).rstrip(' '),
+ """
+ ).rstrip(" "),
},
- 'v4_and_v6_static': {
- 'expected_eni': textwrap.dedent("""\
+ "v4_and_v6_static": {
+ "expected_networkd": textwrap.dedent(
+ """\
+ [Match]
+ Name=iface0
+ [Link]
+ MTUBytes=8999
+ [Network]
+ DHCP=no
+ [Address]
+ Address=192.168.14.2/24
+ Address=2001:1::1/64
+ """
+ ).rstrip(" "),
+ "expected_eni": textwrap.dedent(
+ """\
auto lo
iface lo inet loopback
@@ -986,8 +1190,10 @@ NETWORK_CONFIGS = {
iface iface0 inet6 static
address 2001:1::1/64
mtu 1500
- """).rstrip(' '),
- 'expected_netplan': textwrap.dedent("""
+ """
+ ).rstrip(" "),
+ "expected_netplan": textwrap.dedent(
+ """
network:
version: 2
ethernets:
@@ -997,8 +1203,10 @@ NETWORK_CONFIGS = {
- 2001:1::1/64
ipv6-mtu: 1500
mtu: 9000
- """).rstrip(' '),
- 'yaml': textwrap.dedent("""\
+ """
+ ).rstrip(" "),
+ "yaml": textwrap.dedent(
+ """\
version: 1
config:
- type: 'physical'
@@ -1011,19 +1219,23 @@ NETWORK_CONFIGS = {
- type: static
address: 2001:1::1/64
mtu: 1500
- """).rstrip(' '),
- 'expected_sysconfig_opensuse': {
- 'ifcfg-iface0': textwrap.dedent("""\
+ """
+ ).rstrip(" "),
+ "expected_sysconfig_opensuse": {
+ "ifcfg-iface0": textwrap.dedent(
+ """\
BOOTPROTO=static
IPADDR=192.168.14.2
IPADDR6=2001:1::1/64
NETMASK=255.255.255.0
STARTMODE=auto
MTU=9000
- """),
+ """
+ ),
},
- 'expected_sysconfig_rhel': {
- 'ifcfg-iface0': textwrap.dedent("""\
+ "expected_sysconfig_rhel": {
+ "ifcfg-iface0": textwrap.dedent(
+ """\
BOOTPROTO=none
DEVICE=iface0
IPADDR=192.168.14.2
@@ -1038,17 +1250,21 @@ NETWORK_CONFIGS = {
USERCTL=no
MTU=9000
IPV6_MTU=1500
- """),
+ """
+ ),
},
},
- 'v6_and_v4': {
- 'expected_sysconfig_opensuse': {
- 'ifcfg-iface0': textwrap.dedent("""\
+ "v6_and_v4": {
+ "expected_sysconfig_opensuse": {
+ "ifcfg-iface0": textwrap.dedent(
+ """\
BOOTPROTO=dhcp
DHCLIENT6_MODE=managed
- STARTMODE=auto""")
+ STARTMODE=auto"""
+ )
},
- 'yaml': textwrap.dedent("""\
+ "yaml": textwrap.dedent(
+ """\
version: 1
config:
- type: 'physical'
@@ -1056,40 +1272,58 @@ NETWORK_CONFIGS = {
subnets:
- type: dhcp6
- type: dhcp4
- """).rstrip(' '),
+ """
+ ).rstrip(" "),
},
- 'dhcpv6_only': {
- 'expected_eni': textwrap.dedent("""\
+ "dhcpv6_only": {
+ "expected_networkd": textwrap.dedent(
+ """\
+ [Match]
+ Name=iface0
+ [Network]
+ DHCP=ipv6
+ """
+ ).rstrip(" "),
+ "expected_eni": textwrap.dedent(
+ """\
auto lo
iface lo inet loopback
auto iface0
iface iface0 inet6 dhcp
- """).rstrip(' '),
- 'expected_netplan': textwrap.dedent("""
+ """
+ ).rstrip(" "),
+ "expected_netplan": textwrap.dedent(
+ """
network:
version: 2
ethernets:
iface0:
dhcp6: true
- """).rstrip(' '),
- 'yaml': textwrap.dedent("""\
+ """
+ ).rstrip(" "),
+ "yaml": textwrap.dedent(
+ """\
version: 1
config:
- type: 'physical'
name: 'iface0'
subnets:
- {'type': 'dhcp6'}
- """).rstrip(' '),
- 'expected_sysconfig_opensuse': {
- 'ifcfg-iface0': textwrap.dedent("""\
+ """
+ ).rstrip(" "),
+ "expected_sysconfig_opensuse": {
+ "ifcfg-iface0": textwrap.dedent(
+ """\
BOOTPROTO=dhcp6
DHCLIENT6_MODE=managed
STARTMODE=auto
- """),
+ """
+ ),
},
- 'expected_sysconfig_rhel': {
- 'ifcfg-iface0': textwrap.dedent("""\
+ "expected_sysconfig_rhel": {
+ "ifcfg-iface0": textwrap.dedent(
+ """\
BOOTPROTO=none
DEVICE=iface0
DHCPV6C=yes
@@ -1099,27 +1333,33 @@ NETWORK_CONFIGS = {
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
- """),
+ """
+ ),
},
},
- 'dhcpv6_accept_ra': {
- 'expected_eni': textwrap.dedent("""\
+ "dhcpv6_accept_ra": {
+ "expected_eni": textwrap.dedent(
+ """\
auto lo
iface lo inet loopback
auto iface0
iface iface0 inet6 dhcp
accept_ra 1
- """).rstrip(' '),
- 'expected_netplan': textwrap.dedent("""
+ """
+ ).rstrip(" "),
+ "expected_netplan": textwrap.dedent(
+ """
network:
version: 2
ethernets:
iface0:
accept-ra: true
dhcp6: true
- """).rstrip(' '),
- 'yaml_v1': textwrap.dedent("""\
+ """
+ ).rstrip(" "),
+ "yaml_v1": textwrap.dedent(
+ """\
version: 1
config:
- type: 'physical'
@@ -1127,23 +1367,29 @@ NETWORK_CONFIGS = {
subnets:
- {'type': 'dhcp6'}
accept-ra: true
- """).rstrip(' '),
- 'yaml_v2': textwrap.dedent("""\
+ """
+ ).rstrip(" "),
+ "yaml_v2": textwrap.dedent(
+ """\
version: 2
ethernets:
iface0:
dhcp6: true
accept-ra: true
- """).rstrip(' '),
- 'expected_sysconfig_opensuse': {
- 'ifcfg-iface0': textwrap.dedent("""\
+ """
+ ).rstrip(" "),
+ "expected_sysconfig_opensuse": {
+ "ifcfg-iface0": textwrap.dedent(
+ """\
BOOTPROTO=dhcp6
DHCLIENT6_MODE=managed
STARTMODE=auto
- """),
+ """
+ ),
},
- 'expected_sysconfig_rhel': {
- 'ifcfg-iface0': textwrap.dedent("""\
+ "expected_sysconfig_rhel": {
+ "ifcfg-iface0": textwrap.dedent(
+ """\
BOOTPROTO=none
DEVICE=iface0
DHCPV6C=yes
@@ -1154,27 +1400,42 @@ NETWORK_CONFIGS = {
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
- """),
+ """
+ ),
},
+ "expected_networkd": textwrap.dedent(
+ """\
+ [Match]
+ Name=iface0
+ [Network]
+ DHCP=ipv6
+ IPv6AcceptRA=True
+ """
+ ).rstrip(" "),
},
- 'dhcpv6_reject_ra': {
- 'expected_eni': textwrap.dedent("""\
+ "dhcpv6_reject_ra": {
+ "expected_eni": textwrap.dedent(
+ """\
auto lo
iface lo inet loopback
auto iface0
iface iface0 inet6 dhcp
accept_ra 0
- """).rstrip(' '),
- 'expected_netplan': textwrap.dedent("""
+ """
+ ).rstrip(" "),
+ "expected_netplan": textwrap.dedent(
+ """
network:
version: 2
ethernets:
iface0:
accept-ra: false
dhcp6: true
- """).rstrip(' '),
- 'yaml_v1': textwrap.dedent("""\
+ """
+ ).rstrip(" "),
+ "yaml_v1": textwrap.dedent(
+ """\
version: 1
config:
- type: 'physical'
@@ -1182,23 +1443,29 @@ NETWORK_CONFIGS = {
subnets:
- {'type': 'dhcp6'}
accept-ra: false
- """).rstrip(' '),
- 'yaml_v2': textwrap.dedent("""\
+ """
+ ).rstrip(" "),
+ "yaml_v2": textwrap.dedent(
+ """\
version: 2
ethernets:
iface0:
dhcp6: true
accept-ra: false
- """).rstrip(' '),
- 'expected_sysconfig_opensuse': {
- 'ifcfg-iface0': textwrap.dedent("""\
+ """
+ ).rstrip(" "),
+ "expected_sysconfig_opensuse": {
+ "ifcfg-iface0": textwrap.dedent(
+ """\
BOOTPROTO=dhcp6
DHCLIENT6_MODE=managed
STARTMODE=auto
- """),
+ """
+ ),
},
- 'expected_sysconfig_rhel': {
- 'ifcfg-iface0': textwrap.dedent("""\
+ "expected_sysconfig_rhel": {
+ "ifcfg-iface0": textwrap.dedent(
+ """\
BOOTPROTO=none
DEVICE=iface0
DHCPV6C=yes
@@ -1209,42 +1476,61 @@ NETWORK_CONFIGS = {
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
- """),
+ """
+ ),
},
+ "expected_networkd": textwrap.dedent(
+ """\
+ [Match]
+ Name=iface0
+ [Network]
+ DHCP=ipv6
+ IPv6AcceptRA=False
+ """
+ ).rstrip(" "),
},
- 'ipv6_slaac': {
- 'expected_eni': textwrap.dedent("""\
+ "ipv6_slaac": {
+ "expected_eni": textwrap.dedent(
+ """\
auto lo
iface lo inet loopback
auto iface0
iface iface0 inet6 auto
dhcp 0
- """).rstrip(' '),
- 'expected_netplan': textwrap.dedent("""
+ """
+ ).rstrip(" "),
+ "expected_netplan": textwrap.dedent(
+ """
network:
version: 2
ethernets:
iface0:
dhcp6: true
- """).rstrip(' '),
- 'yaml': textwrap.dedent("""\
+ """
+ ).rstrip(" "),
+ "yaml": textwrap.dedent(
+ """\
version: 1
config:
- type: 'physical'
name: 'iface0'
subnets:
- {'type': 'ipv6_slaac'}
- """).rstrip(' '),
- 'expected_sysconfig_opensuse': {
- 'ifcfg-iface0': textwrap.dedent("""\
+ """
+ ).rstrip(" "),
+ "expected_sysconfig_opensuse": {
+ "ifcfg-iface0": textwrap.dedent(
+ """\
BOOTPROTO=dhcp6
DHCLIENT6_MODE=info
STARTMODE=auto
- """),
+ """
+ ),
},
- 'expected_sysconfig_rhel': {
- 'ifcfg-iface0': textwrap.dedent("""\
+ "expected_sysconfig_rhel": {
+ "ifcfg-iface0": textwrap.dedent(
+ """\
BOOTPROTO=none
DEVICE=iface0
IPV6_AUTOCONF=yes
@@ -1254,11 +1540,13 @@ NETWORK_CONFIGS = {
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
- """),
+ """
+ ),
},
},
- 'static6': {
- 'yaml': textwrap.dedent("""\
+ "static6": {
+ "yaml": textwrap.dedent(
+ """\
version: 1
config:
- type: 'physical'
@@ -1267,9 +1555,11 @@ NETWORK_CONFIGS = {
subnets:
- type: 'static6'
address: 2001:1::1/64
- """).rstrip(' '),
- 'expected_sysconfig_rhel': {
- 'ifcfg-iface0': textwrap.dedent("""\
+ """
+ ).rstrip(" "),
+ "expected_sysconfig_rhel": {
+ "ifcfg-iface0": textwrap.dedent(
+ """\
BOOTPROTO=none
DEVICE=iface0
IPV6ADDR=2001:1::1/64
@@ -1281,42 +1571,52 @@ NETWORK_CONFIGS = {
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
- """),
+ """
+ ),
},
},
- 'dhcpv6_stateless': {
- 'expected_eni': textwrap.dedent("""\
+ "dhcpv6_stateless": {
+ "expected_eni": textwrap.dedent(
+ """\
auto lo
iface lo inet loopback
auto iface0
iface iface0 inet6 auto
dhcp 1
- """).rstrip(' '),
- 'expected_netplan': textwrap.dedent("""
+ """
+ ).rstrip(" "),
+ "expected_netplan": textwrap.dedent(
+ """
network:
version: 2
ethernets:
iface0:
dhcp6: true
- """).rstrip(' '),
- 'yaml': textwrap.dedent("""\
+ """
+ ).rstrip(" "),
+ "yaml": textwrap.dedent(
+ """\
version: 1
config:
- type: 'physical'
name: 'iface0'
subnets:
- {'type': 'ipv6_dhcpv6-stateless'}
- """).rstrip(' '),
- 'expected_sysconfig_opensuse': {
- 'ifcfg-iface0': textwrap.dedent("""\
+ """
+ ).rstrip(" "),
+ "expected_sysconfig_opensuse": {
+ "ifcfg-iface0": textwrap.dedent(
+ """\
BOOTPROTO=dhcp6
DHCLIENT6_MODE=info
STARTMODE=auto
- """),
+ """
+ ),
},
- 'expected_sysconfig_rhel': {
- 'ifcfg-iface0': textwrap.dedent("""\
+ "expected_sysconfig_rhel": {
+ "ifcfg-iface0": textwrap.dedent(
+ """\
BOOTPROTO=none
DEVICE=iface0
DHCPV6C=yes
@@ -1328,26 +1628,32 @@ NETWORK_CONFIGS = {
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
- """),
+ """
+ ),
},
},
- 'dhcpv6_stateful': {
- 'expected_eni': textwrap.dedent("""\
+ "dhcpv6_stateful": {
+ "expected_eni": textwrap.dedent(
+ """\
auto lo
iface lo inet loopback
auto iface0
iface iface0 inet6 dhcp
- """).rstrip(' '),
- 'expected_netplan': textwrap.dedent("""
+ """
+ ).rstrip(" "),
+ "expected_netplan": textwrap.dedent(
+ """
network:
version: 2
ethernets:
iface0:
accept-ra: true
dhcp6: true
- """).rstrip(' '),
- 'yaml': textwrap.dedent("""\
+ """
+ ).rstrip(" "),
+ "yaml": textwrap.dedent(
+ """\
version: 1
config:
- type: 'physical'
@@ -1355,95 +1661,118 @@ NETWORK_CONFIGS = {
subnets:
- {'type': 'ipv6_dhcpv6-stateful'}
accept-ra: true
- """).rstrip(' '),
- 'expected_sysconfig_opensuse': {
- 'ifcfg-iface0': textwrap.dedent("""\
+ """
+ ).rstrip(" "),
+ "expected_sysconfig_opensuse": {
+ "ifcfg-iface0": textwrap.dedent(
+ """\
BOOTPROTO=dhcp6
DHCLIENT6_MODE=managed
STARTMODE=auto
- """),
+ """
+ ),
},
- 'expected_sysconfig_rhel': {
- 'ifcfg-iface0': textwrap.dedent("""\
- BOOTPROTO=none
+ "expected_sysconfig_rhel": {
+ "ifcfg-iface0": textwrap.dedent(
+ """\
+ BOOTPROTO=dhcp
DEVICE=iface0
DHCPV6C=yes
IPV6INIT=yes
+ IPV6_AUTOCONF=no
IPV6_FORCE_ACCEPT_RA=yes
DEVICE=iface0
NM_CONTROLLED=no
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
- """),
+ """
+ ),
},
},
- 'wakeonlan_disabled': {
- 'expected_eni': textwrap.dedent("""\
+ "wakeonlan_disabled": {
+ "expected_eni": textwrap.dedent(
+ """\
auto lo
iface lo inet loopback
auto iface0
iface iface0 inet dhcp
- """).rstrip(' '),
- 'expected_netplan': textwrap.dedent("""
+ """
+ ).rstrip(" "),
+ "expected_netplan": textwrap.dedent(
+ """
network:
ethernets:
iface0:
dhcp4: true
wakeonlan: false
version: 2
- """),
- 'expected_sysconfig_opensuse': {
- 'ifcfg-iface0': textwrap.dedent("""\
+ """
+ ),
+ "expected_sysconfig_opensuse": {
+ "ifcfg-iface0": textwrap.dedent(
+ """\
BOOTPROTO=dhcp4
STARTMODE=auto
- """),
+ """
+ ),
},
- 'expected_sysconfig_rhel': {
- 'ifcfg-iface0': textwrap.dedent("""\
+ "expected_sysconfig_rhel": {
+ "ifcfg-iface0": textwrap.dedent(
+ """\
BOOTPROTO=dhcp
DEVICE=iface0
NM_CONTROLLED=no
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
- """),
+ """
+ ),
},
- 'yaml_v2': textwrap.dedent("""\
+ "yaml_v2": textwrap.dedent(
+ """\
version: 2
ethernets:
iface0:
dhcp4: true
wakeonlan: false
- """).rstrip(' '),
+ """
+ ).rstrip(" "),
},
- 'wakeonlan_enabled': {
- 'expected_eni': textwrap.dedent("""\
+ "wakeonlan_enabled": {
+ "expected_eni": textwrap.dedent(
+ """\
auto lo
iface lo inet loopback
auto iface0
iface iface0 inet dhcp
ethernet-wol g
- """).rstrip(' '),
- 'expected_netplan': textwrap.dedent("""
+ """
+ ).rstrip(" "),
+ "expected_netplan": textwrap.dedent(
+ """
network:
ethernets:
iface0:
dhcp4: true
wakeonlan: true
version: 2
- """),
- 'expected_sysconfig_opensuse': {
- 'ifcfg-iface0': textwrap.dedent("""\
+ """
+ ),
+ "expected_sysconfig_opensuse": {
+ "ifcfg-iface0": textwrap.dedent(
+ """\
BOOTPROTO=dhcp4
ETHTOOL_OPTS="wol g"
STARTMODE=auto
- """),
+ """
+ ),
},
- 'expected_sysconfig_rhel': {
- 'ifcfg-iface0': textwrap.dedent("""\
+ "expected_sysconfig_rhel": {
+ "ifcfg-iface0": textwrap.dedent(
+ """\
BOOTPROTO=dhcp
DEVICE=iface0
ETHTOOL_OPTS="wol g"
@@ -1451,18 +1780,21 @@ NETWORK_CONFIGS = {
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
- """),
+ """
+ ),
},
- 'yaml_v2': textwrap.dedent("""\
+ "yaml_v2": textwrap.dedent(
+ """\
version: 2
ethernets:
iface0:
dhcp4: true
wakeonlan: true
- """).rstrip(' '),
+ """
+ ).rstrip(" "),
},
- 'all': {
- 'expected_eni': ("""\
+ "all": {
+ "expected_eni": """\
auto lo
iface lo inet loopback
dns-nameservers 8.8.8.8 4.4.4.4 8.8.4.4
@@ -1552,8 +1884,9 @@ iface eth0.101 inet static
post-up route add -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
-"""),
- 'expected_netplan': textwrap.dedent("""
+""",
+ "expected_netplan": textwrap.dedent(
+ """
network:
version: 2
ethernets:
@@ -1649,25 +1982,31 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
- barley.maas
- sacchromyces.maas
- brettanomyces.maas
- """).rstrip(' '),
- 'expected_sysconfig_opensuse': {
- 'ifcfg-bond0': textwrap.dedent("""\
+ """
+ ).rstrip(" "),
+ "expected_sysconfig_opensuse": {
+ "ifcfg-bond0": textwrap.dedent(
+ """\
BONDING_MASTER=yes
- BONDING_OPTS="mode=active-backup """
- """xmit_hash_policy=layer3+4 """
- """miimon=100"
+ BONDING_MODULE_OPTS="mode=active-backup """
+ """xmit_hash_policy=layer3+4 """
+ """miimon=100"
BONDING_SLAVE_0=eth1
BONDING_SLAVE_1=eth2
BOOTPROTO=dhcp6
DHCLIENT6_MODE=managed
LLADDR=aa:bb:cc:dd:ee:ff
- STARTMODE=auto"""),
- 'ifcfg-bond0.200': textwrap.dedent("""\
+ STARTMODE=auto"""
+ ),
+ "ifcfg-bond0.200": textwrap.dedent(
+ """\
BOOTPROTO=dhcp4
ETHERDEVICE=bond0
STARTMODE=auto
- VLAN_ID=200"""),
- 'ifcfg-br0': textwrap.dedent("""\
+ VLAN_ID=200"""
+ ),
+ "ifcfg-br0": textwrap.dedent(
+ """\
BRIDGE_AGEINGTIME=250
BOOTPROTO=static
IPADDR=192.168.14.2
@@ -1677,12 +2016,16 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
BRIDGE_PRIORITY=22
BRIDGE_PORTS='eth3 eth4'
STARTMODE=auto
- BRIDGE_STP=off"""),
- 'ifcfg-eth0': textwrap.dedent("""\
+ BRIDGE_STP=off"""
+ ),
+ "ifcfg-eth0": textwrap.dedent(
+ """\
BOOTPROTO=static
LLADDR=c0:d6:9f:2c:e8:80
- STARTMODE=auto"""),
- 'ifcfg-eth0.101': textwrap.dedent("""\
+ STARTMODE=auto"""
+ ),
+ "ifcfg-eth0.101": textwrap.dedent(
+ """\
BOOTPROTO=static
IPADDR=192.168.0.2
IPADDR1=192.168.2.10
@@ -1691,44 +2034,58 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
NETMASK1=255.255.255.0
ETHERDEVICE=eth0
STARTMODE=auto
- VLAN_ID=101"""),
- 'ifcfg-eth1': textwrap.dedent("""\
+ VLAN_ID=101"""
+ ),
+ "ifcfg-eth1": textwrap.dedent(
+ """\
BOOTPROTO=none
LLADDR=aa:d6:9f:2c:e8:80
- STARTMODE=hotplug"""),
- 'ifcfg-eth2': textwrap.dedent("""\
+ STARTMODE=hotplug"""
+ ),
+ "ifcfg-eth2": textwrap.dedent(
+ """\
BOOTPROTO=none
LLADDR=c0:bb:9f:2c:e8:80
- STARTMODE=hotplug"""),
- 'ifcfg-eth3': textwrap.dedent("""\
+ STARTMODE=hotplug"""
+ ),
+ "ifcfg-eth3": textwrap.dedent(
+ """\
BOOTPROTO=static
BRIDGE=yes
LLADDR=66:bb:9f:2c:e8:80
- STARTMODE=auto"""),
- 'ifcfg-eth4': textwrap.dedent("""\
+ STARTMODE=auto"""
+ ),
+ "ifcfg-eth4": textwrap.dedent(
+ """\
BOOTPROTO=static
BRIDGE=yes
LLADDR=98:bb:9f:2c:e8:80
- STARTMODE=auto"""),
- 'ifcfg-eth5': textwrap.dedent("""\
+ STARTMODE=auto"""
+ ),
+ "ifcfg-eth5": textwrap.dedent(
+ """\
BOOTPROTO=dhcp
LLADDR=98:bb:9f:2c:e8:8a
- STARTMODE=manual"""),
- 'ifcfg-ib0': textwrap.dedent("""\
+ STARTMODE=manual"""
+ ),
+ "ifcfg-ib0": textwrap.dedent(
+ """\
BOOTPROTO=static
LLADDR=a0:00:02:20:fe:80:00:00:00:00:00:00:ec:0d:9a:03:00:15:e2:c1
IPADDR=192.168.200.7
MTU=9000
NETMASK=255.255.255.0
STARTMODE=auto
- TYPE=InfiniBand"""),
+ TYPE=InfiniBand"""
+ ),
},
- 'expected_sysconfig_rhel': {
- 'ifcfg-bond0': textwrap.dedent("""\
+ "expected_sysconfig_rhel": {
+ "ifcfg-bond0": textwrap.dedent(
+ """\
BONDING_MASTER=yes
BONDING_OPTS="mode=active-backup """
- """xmit_hash_policy=layer3+4 """
- """miimon=100"
+ """xmit_hash_policy=layer3+4 """
+ """miimon=100"
BONDING_SLAVE0=eth1
BONDING_SLAVE1=eth2
BOOTPROTO=none
@@ -1739,8 +2096,10 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
NM_CONTROLLED=no
ONBOOT=yes
TYPE=Bond
- USERCTL=no"""),
- 'ifcfg-bond0.200': textwrap.dedent("""\
+ USERCTL=no"""
+ ),
+ "ifcfg-bond0.200": textwrap.dedent(
+ """\
BOOTPROTO=dhcp
DEVICE=bond0.200
DHCLIENT_SET_DEFAULT_ROUTE=no
@@ -1748,8 +2107,10 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
ONBOOT=yes
PHYSDEV=bond0
USERCTL=no
- VLAN=yes"""),
- 'ifcfg-br0': textwrap.dedent("""\
+ VLAN=yes"""
+ ),
+ "ifcfg-br0": textwrap.dedent(
+ """\
AGEING=250
BOOTPROTO=none
DEFROUTE=yes
@@ -1767,16 +2128,20 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
PRIO=22
STP=no
TYPE=Bridge
- USERCTL=no"""),
- 'ifcfg-eth0': textwrap.dedent("""\
+ USERCTL=no"""
+ ),
+ "ifcfg-eth0": textwrap.dedent(
+ """\
BOOTPROTO=none
DEVICE=eth0
HWADDR=c0:d6:9f:2c:e8:80
NM_CONTROLLED=no
ONBOOT=yes
TYPE=Ethernet
- USERCTL=no"""),
- 'ifcfg-eth0.101': textwrap.dedent("""\
+ USERCTL=no"""
+ ),
+ "ifcfg-eth0.101": textwrap.dedent(
+ """\
BOOTPROTO=none
DEFROUTE=yes
DEVICE=eth0.101
@@ -1793,8 +2158,10 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
ONBOOT=yes
PHYSDEV=eth0
USERCTL=no
- VLAN=yes"""),
- 'ifcfg-eth1': textwrap.dedent("""\
+ VLAN=yes"""
+ ),
+ "ifcfg-eth1": textwrap.dedent(
+ """\
BOOTPROTO=none
DEVICE=eth1
HWADDR=aa:d6:9f:2c:e8:80
@@ -1803,8 +2170,10 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
ONBOOT=yes
SLAVE=yes
TYPE=Ethernet
- USERCTL=no"""),
- 'ifcfg-eth2': textwrap.dedent("""\
+ USERCTL=no"""
+ ),
+ "ifcfg-eth2": textwrap.dedent(
+ """\
BOOTPROTO=none
DEVICE=eth2
HWADDR=c0:bb:9f:2c:e8:80
@@ -1813,8 +2182,10 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
ONBOOT=yes
SLAVE=yes
TYPE=Ethernet
- USERCTL=no"""),
- 'ifcfg-eth3': textwrap.dedent("""\
+ USERCTL=no"""
+ ),
+ "ifcfg-eth3": textwrap.dedent(
+ """\
BOOTPROTO=none
BRIDGE=br0
DEVICE=eth3
@@ -1822,8 +2193,10 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
NM_CONTROLLED=no
ONBOOT=yes
TYPE=Ethernet
- USERCTL=no"""),
- 'ifcfg-eth4': textwrap.dedent("""\
+ USERCTL=no"""
+ ),
+ "ifcfg-eth4": textwrap.dedent(
+ """\
BOOTPROTO=none
BRIDGE=br0
DEVICE=eth4
@@ -1831,8 +2204,10 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
NM_CONTROLLED=no
ONBOOT=yes
TYPE=Ethernet
- USERCTL=no"""),
- 'ifcfg-eth5': textwrap.dedent("""\
+ USERCTL=no"""
+ ),
+ "ifcfg-eth5": textwrap.dedent(
+ """\
BOOTPROTO=dhcp
DEVICE=eth5
DHCLIENT_SET_DEFAULT_ROUTE=no
@@ -1840,8 +2215,10 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
NM_CONTROLLED=no
ONBOOT=no
TYPE=Ethernet
- USERCTL=no"""),
- 'ifcfg-ib0': textwrap.dedent("""\
+ USERCTL=no"""
+ ),
+ "ifcfg-ib0": textwrap.dedent(
+ """\
BOOTPROTO=none
DEVICE=ib0
HWADDR=a0:00:02:20:fe:80:00:00:00:00:00:00:ec:0d:9a:03:00:15:e2:c1
@@ -1851,9 +2228,11 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
NM_CONTROLLED=no
ONBOOT=yes
TYPE=InfiniBand
- USERCTL=no"""),
+ USERCTL=no"""
+ ),
},
- 'yaml': textwrap.dedent("""
+ "yaml": textwrap.dedent(
+ """
version: 1
config:
# Physical interfaces.
@@ -1996,10 +2375,12 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
destination: 10.0.0.0/8
gateway: 11.0.0.1
metric: 3
- """).lstrip(),
+ """
+ ).lstrip(),
},
- 'bond': {
- 'yaml': textwrap.dedent("""
+ "bond": {
+ "yaml": textwrap.dedent(
+ """
version: 1
config:
- type: physical
@@ -2040,13 +2421,15 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
routes:
- gateway: 2001:67c:1562:1
network: 2001:67c:1
- netmask: ffff:ffff:0
+ netmask: "ffff:ffff::"
- gateway: 3001:67c:1562:1
network: 3001:67c:1
- netmask: ffff:ffff:0
+ netmask: "ffff:ffff::"
metric: 10000
- """),
- 'expected_netplan': textwrap.dedent("""
+ """
+ ),
+ "expected_netplan": textwrap.dedent(
+ """
network:
version: 2
ethernets:
@@ -2088,8 +2471,10 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
- metric: 10000
to: 3001:67c:1/32
via: 3001:67c:1562:1
- """),
- 'expected_eni': textwrap.dedent("""\
+ """
+ ),
+ "expected_eni": textwrap.dedent(
+ """\
auto lo
iface lo inet loopback
@@ -2151,8 +2536,10 @@ iface bond0 inet6 static
|| true
pre-down route del -A inet6 3001:67c:1/32 gw 3001:67c:1562:1 metric 10000 \
|| true
- """),
- 'yaml-v2': textwrap.dedent("""
+ """
+ ),
+ "yaml-v2": textwrap.dedent(
+ """
version: 2
ethernets:
eth0:
@@ -2192,8 +2579,10 @@ iface bond0 inet6 static
- metric: 10000
to: 3001:67c:1562:8007::1/64
via: 3001:67c:1562:8007::aac:40b2
- """),
- 'expected_netplan-v2': textwrap.dedent("""
+ """
+ ),
+ "expected_netplan-v2": textwrap.dedent(
+ """
network:
bonds:
bond0:
@@ -2234,17 +2623,18 @@ iface bond0 inet6 static
macaddress: aa:bb:cc:dd:e8:01
set-name: vf0
version: 2
- """),
-
- 'expected_sysconfig_opensuse': {
- 'ifcfg-bond0': textwrap.dedent("""\
+ """
+ ),
+ "expected_sysconfig_opensuse": {
+ "ifcfg-bond0": textwrap.dedent(
+ """\
BONDING_MASTER=yes
- BONDING_OPTS="mode=active-backup xmit_hash_policy=layer3+4 """
- """miimon=100 num_grat_arp=5 """
- """downdelay=10 updelay=20 """
- """fail_over_mac=active """
- """primary=bond0s0 """
- """primary_reselect=always"
+ BONDING_MODULE_OPTS="mode=active-backup xmit_hash_policy=layer3+4 """
+ """miimon=100 num_grat_arp=5 """
+ """downdelay=10 updelay=20 """
+ """fail_over_mac=active """
+ """primary=bond0s0 """
+ """primary_reselect=always"
BONDING_SLAVE_0=bond0s0
BONDING_SLAVE_1=bond0s1
BOOTPROTO=static
@@ -2256,27 +2646,33 @@ iface bond0 inet6 static
NETMASK=255.255.255.0
NETMASK1=255.255.255.0
STARTMODE=auto
- """),
- 'ifcfg-bond0s0': textwrap.dedent("""\
+ """
+ ),
+ "ifcfg-bond0s0": textwrap.dedent(
+ """\
BOOTPROTO=none
LLADDR=aa:bb:cc:dd:e8:00
STARTMODE=hotplug
- """),
- 'ifcfg-bond0s1': textwrap.dedent("""\
+ """
+ ),
+ "ifcfg-bond0s1": textwrap.dedent(
+ """\
BOOTPROTO=none
LLADDR=aa:bb:cc:dd:e8:01
STARTMODE=hotplug
- """),
+ """
+ ),
},
- 'expected_sysconfig_rhel': {
- 'ifcfg-bond0': textwrap.dedent("""\
+ "expected_sysconfig_rhel": {
+ "ifcfg-bond0": textwrap.dedent(
+ """\
BONDING_MASTER=yes
BONDING_OPTS="mode=active-backup xmit_hash_policy=layer3+4 """
- """miimon=100 num_grat_arp=5 """
- """downdelay=10 updelay=20 """
- """fail_over_mac=active """
- """primary=bond0s0 """
- """primary_reselect=always"
+ """miimon=100 num_grat_arp=5 """
+ """downdelay=10 updelay=20 """
+ """fail_over_mac=active """
+ """primary=bond0s0 """
+ """primary_reselect=always"
BONDING_SLAVE0=bond0s0
BONDING_SLAVE1=bond0s1
BOOTPROTO=none
@@ -2297,8 +2693,10 @@ iface bond0 inet6 static
ONBOOT=yes
TYPE=Bond
USERCTL=no
- """),
- 'ifcfg-bond0s0': textwrap.dedent("""\
+ """
+ ),
+ "ifcfg-bond0s0": textwrap.dedent(
+ """\
BOOTPROTO=none
DEVICE=bond0s0
HWADDR=aa:bb:cc:dd:e8:00
@@ -2308,19 +2706,25 @@ iface bond0 inet6 static
SLAVE=yes
TYPE=Ethernet
USERCTL=no
- """),
- 'route6-bond0': textwrap.dedent("""\
+ """
+ ),
+ "route6-bond0": textwrap.dedent(
+ """\
# Created by cloud-init on instance boot automatically, do not edit.
#
- 2001:67c:1/ffff:ffff:0 via 2001:67c:1562:1 dev bond0
- 3001:67c:1/ffff:ffff:0 via 3001:67c:1562:1 metric 10000 dev bond0
- """),
- 'route-bond0': textwrap.dedent("""\
+ 2001:67c:1/32 via 2001:67c:1562:1 dev bond0
+ 3001:67c:1/32 via 3001:67c:1562:1 metric 10000 dev bond0
+ """
+ ),
+ "route-bond0": textwrap.dedent(
+ """\
ADDRESS0=10.1.3.0
GATEWAY0=192.168.0.3
NETMASK0=255.255.255.0
- """),
- 'ifcfg-bond0s1': textwrap.dedent("""\
+ """
+ ),
+ "ifcfg-bond0s1": textwrap.dedent(
+ """\
BOOTPROTO=none
DEVICE=bond0s1
HWADDR=aa:bb:cc:dd:e8:01
@@ -2330,11 +2734,13 @@ iface bond0 inet6 static
SLAVE=yes
TYPE=Ethernet
USERCTL=no
- """),
+ """
+ ),
},
},
- 'vlan': {
- 'yaml': textwrap.dedent("""
+ "vlan": {
+ "yaml": textwrap.dedent(
+ """
version: 1
config:
- type: physical
@@ -2357,14 +2763,18 @@ iface bond0 inet6 static
- gateway: 2001:1::1
netmask: '::'
network: '::'
- """),
- 'expected_sysconfig_opensuse': {
+ """
+ ),
+ "expected_sysconfig_opensuse": {
# TODO RJS: unknown proper BOOTPROTO setting ask Marius
- 'ifcfg-en0': textwrap.dedent("""\
+ "ifcfg-en0": textwrap.dedent(
+ """\
BOOTPROTO=static
LLADDR=aa:bb:cc:dd:e8:00
- STARTMODE=auto"""),
- 'ifcfg-en0.99': textwrap.dedent("""\
+ STARTMODE=auto"""
+ ),
+ "ifcfg-en0.99": textwrap.dedent(
+ """\
BOOTPROTO=static
IPADDR=192.168.2.2
IPADDR1=192.168.1.2
@@ -2375,18 +2785,22 @@ iface bond0 inet6 static
STARTMODE=auto
ETHERDEVICE=en0
VLAN_ID=99
- """),
+ """
+ ),
},
- 'expected_sysconfig_rhel': {
- 'ifcfg-en0': textwrap.dedent("""\
+ "expected_sysconfig_rhel": {
+ "ifcfg-en0": textwrap.dedent(
+ """\
BOOTPROTO=none
DEVICE=en0
HWADDR=aa:bb:cc:dd:e8:00
NM_CONTROLLED=no
ONBOOT=yes
TYPE=Ethernet
- USERCTL=no"""),
- 'ifcfg-en0.99': textwrap.dedent("""\
+ USERCTL=no"""
+ ),
+ "ifcfg-en0.99": textwrap.dedent(
+ """\
BOOTPROTO=none
DEFROUTE=yes
DEVICE=en0.99
@@ -2405,11 +2819,13 @@ iface bond0 inet6 static
ONBOOT=yes
PHYSDEV=en0
USERCTL=no
- VLAN=yes"""),
+ VLAN=yes"""
+ ),
},
},
- 'bridge': {
- 'yaml': textwrap.dedent("""
+ "bridge": {
+ "yaml": textwrap.dedent(
+ """
version: 1
config:
- type: physical
@@ -2434,9 +2850,11 @@ iface bond0 inet6 static
bridge_bridgeprio: 22
subnets:
- type: static
- address: 192.168.2.2/24"""),
- 'expected_sysconfig_opensuse': {
- 'ifcfg-br0': textwrap.dedent("""\
+ address: 192.168.2.2/24"""
+ ),
+ "expected_sysconfig_opensuse": {
+ "ifcfg-br0": textwrap.dedent(
+ """\
BOOTPROTO=static
IPADDR=192.168.2.2
NETMASK=255.255.255.0
@@ -2444,24 +2862,30 @@ iface bond0 inet6 static
BRIDGE_STP=off
BRIDGE_PRIORITY=22
BRIDGE_PORTS='eth0 eth1'
- """),
- 'ifcfg-eth0': textwrap.dedent("""\
+ """
+ ),
+ "ifcfg-eth0": textwrap.dedent(
+ """\
BOOTPROTO=static
BRIDGE=yes
LLADDR=52:54:00:12:34:00
IPADDR6=2001:1::100/96
STARTMODE=auto
- """),
- 'ifcfg-eth1': textwrap.dedent("""\
+ """
+ ),
+ "ifcfg-eth1": textwrap.dedent(
+ """\
BOOTPROTO=static
BRIDGE=yes
LLADDR=52:54:00:12:34:01
IPADDR6=2001:1::101/96
STARTMODE=auto
- """),
+ """
+ ),
},
- 'expected_sysconfig_rhel': {
- 'ifcfg-br0': textwrap.dedent("""\
+ "expected_sysconfig_rhel": {
+ "ifcfg-br0": textwrap.dedent(
+ """\
BOOTPROTO=none
DEVICE=br0
IPADDR=192.168.2.2
@@ -2472,8 +2896,10 @@ iface bond0 inet6 static
STP=no
TYPE=Bridge
USERCTL=no
- """),
- 'ifcfg-eth0': textwrap.dedent("""\
+ """
+ ),
+ "ifcfg-eth0": textwrap.dedent(
+ """\
BOOTPROTO=none
BRIDGE=br0
DEVICE=eth0
@@ -2486,8 +2912,10 @@ iface bond0 inet6 static
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
- """),
- 'ifcfg-eth1': textwrap.dedent("""\
+ """
+ ),
+ "ifcfg-eth1": textwrap.dedent(
+ """\
BOOTPROTO=none
BRIDGE=br0
DEVICE=eth1
@@ -2500,11 +2928,13 @@ iface bond0 inet6 static
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
- """),
+ """
+ ),
},
},
- 'manual': {
- 'yaml': textwrap.dedent("""
+ "manual": {
+ "yaml": textwrap.dedent(
+ """
version: 1
config:
- type: physical
@@ -2526,8 +2956,10 @@ iface bond0 inet6 static
subnets:
- type: manual
control: manual
- """),
- 'expected_eni': textwrap.dedent("""\
+ """
+ ),
+ "expected_eni": textwrap.dedent(
+ """\
auto lo
iface lo inet loopback
@@ -2541,8 +2973,10 @@ iface bond0 inet6 static
# control-manual eth2
iface eth2 inet manual
- """),
- 'expected_netplan': textwrap.dedent("""\
+ """
+ ),
+ "expected_netplan": textwrap.dedent(
+ """\
network:
version: 2
@@ -2562,29 +2996,37 @@ iface bond0 inet6 static
match:
macaddress: 52:54:00:12:34:ff
set-name: eth2
- """),
- 'expected_sysconfig_opensuse': {
- 'ifcfg-eth0': textwrap.dedent("""\
+ """
+ ),
+ "expected_sysconfig_opensuse": {
+ "ifcfg-eth0": textwrap.dedent(
+ """\
BOOTPROTO=static
LLADDR=52:54:00:12:34:00
IPADDR=192.168.1.2
NETMASK=255.255.255.0
STARTMODE=manual
- """),
- 'ifcfg-eth1': textwrap.dedent("""\
+ """
+ ),
+ "ifcfg-eth1": textwrap.dedent(
+ """\
BOOTPROTO=static
LLADDR=52:54:00:12:34:aa
MTU=1480
STARTMODE=auto
- """),
- 'ifcfg-eth2': textwrap.dedent("""\
+ """
+ ),
+ "ifcfg-eth2": textwrap.dedent(
+ """\
BOOTPROTO=static
LLADDR=52:54:00:12:34:ff
STARTMODE=manual
- """),
+ """
+ ),
},
- 'expected_sysconfig_rhel': {
- 'ifcfg-eth0': textwrap.dedent("""\
+ "expected_sysconfig_rhel": {
+ "ifcfg-eth0": textwrap.dedent(
+ """\
BOOTPROTO=none
DEVICE=eth0
HWADDR=52:54:00:12:34:00
@@ -2594,8 +3036,10 @@ iface bond0 inet6 static
ONBOOT=no
TYPE=Ethernet
USERCTL=no
- """),
- 'ifcfg-eth1': textwrap.dedent("""\
+ """
+ ),
+ "ifcfg-eth1": textwrap.dedent(
+ """\
BOOTPROTO=none
DEVICE=eth1
HWADDR=52:54:00:12:34:aa
@@ -2604,8 +3048,10 @@ iface bond0 inet6 static
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
- """),
- 'ifcfg-eth2': textwrap.dedent("""\
+ """
+ ),
+ "ifcfg-eth2": textwrap.dedent(
+ """\
BOOTPROTO=none
DEVICE=eth2
HWADDR=52:54:00:12:34:ff
@@ -2613,51 +3059,85 @@ iface bond0 inet6 static
ONBOOT=no
TYPE=Ethernet
USERCTL=no
- """),
+ """
+ ),
},
},
}
CONFIG_V1_EXPLICIT_LOOPBACK = {
- 'version': 1,
- 'config': [{'name': 'eth0', 'type': 'physical',
- 'subnets': [{'control': 'auto', 'type': 'dhcp'}]},
- {'name': 'lo', 'type': 'loopback',
- 'subnets': [{'control': 'auto', 'type': 'loopback'}]},
- ]}
+ "version": 1,
+ "config": [
+ {
+ "name": "eth0",
+ "type": "physical",
+ "subnets": [{"control": "auto", "type": "dhcp"}],
+ },
+ {
+ "name": "lo",
+ "type": "loopback",
+ "subnets": [{"control": "auto", "type": "loopback"}],
+ },
+ ],
+}
CONFIG_V1_SIMPLE_SUBNET = {
- 'version': 1,
- 'config': [{'mac_address': '52:54:00:12:34:00',
- 'name': 'interface0',
- 'subnets': [{'address': '10.0.2.15',
- 'gateway': '10.0.2.2',
- 'netmask': '255.255.255.0',
- 'type': 'static'}],
- 'type': 'physical'}]}
+ "version": 1,
+ "config": [
+ {
+ "mac_address": "52:54:00:12:34:00",
+ "name": "interface0",
+ "subnets": [
+ {
+ "address": "10.0.2.15",
+ "gateway": "10.0.2.2",
+ "netmask": "255.255.255.0",
+ "type": "static",
+ }
+ ],
+ "type": "physical",
+ }
+ ],
+}
CONFIG_V1_MULTI_IFACE = {
- 'version': 1,
- 'config': [{'type': 'physical',
- 'mtu': 1500,
- 'subnets': [{'type': 'static',
- 'netmask': '255.255.240.0',
- 'routes': [{'netmask': '0.0.0.0',
- 'network': '0.0.0.0',
- 'gateway': '51.68.80.1'}],
- 'address': '51.68.89.122',
- 'ipv4': True}],
- 'mac_address': 'fa:16:3e:25:b4:59',
- 'name': 'eth0'},
- {'type': 'physical',
- 'mtu': 9000,
- 'subnets': [{'type': 'dhcp4'}],
- 'mac_address': 'fa:16:3e:b1:ca:29', 'name': 'eth1'}]}
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "mtu": 1500,
+ "subnets": [
+ {
+ "type": "static",
+ "netmask": "255.255.240.0",
+ "routes": [
+ {
+ "netmask": "0.0.0.0",
+ "network": "0.0.0.0",
+ "gateway": "51.68.80.1",
+ }
+ ],
+ "address": "51.68.89.122",
+ "ipv4": True,
+ }
+ ],
+ "mac_address": "fa:16:3e:25:b4:59",
+ "name": "eth0",
+ },
+ {
+ "type": "physical",
+ "mtu": 9000,
+ "subnets": [{"type": "dhcp4"}],
+ "mac_address": "fa:16:3e:b1:ca:29",
+ "name": "eth1",
+ },
+ ],
+}
DEFAULT_DEV_ATTRS = {
- 'eth1000': {
+ "eth1000": {
"bridge": False,
"carrier": False,
"dormant": False,
@@ -2670,16 +3150,26 @@ DEFAULT_DEV_ATTRS = {
}
-def _setup_test(tmp_dir, mock_get_devicelist, mock_read_sys_net,
- mock_sys_dev_path, dev_attrs=None):
+def _setup_test(
+ tmp_dir,
+ mock_get_devicelist,
+ mock_read_sys_net,
+ mock_sys_dev_path,
+ dev_attrs=None,
+):
if not dev_attrs:
dev_attrs = DEFAULT_DEV_ATTRS
mock_get_devicelist.return_value = dev_attrs.keys()
- def fake_read(devname, path, translate=None,
- on_enoent=None, on_keyerror=None,
- on_einval=None):
+ def fake_read(
+ devname,
+ path,
+ translate=None,
+ on_enoent=None,
+ on_keyerror=None,
+ on_einval=None,
+ ):
return dev_attrs[devname][path]
mock_read_sys_net.side_effect = fake_read
@@ -2689,99 +3179,137 @@ def _setup_test(tmp_dir, mock_get_devicelist, mock_read_sys_net,
for dev in dev_attrs:
os.makedirs(os.path.join(tmp_dir, dev))
- with open(os.path.join(tmp_dir, dev, 'operstate'), 'w') as fh:
- fh.write(dev_attrs[dev]['operstate'])
+ with open(os.path.join(tmp_dir, dev, "operstate"), "w") as fh:
+ fh.write(dev_attrs[dev]["operstate"])
os.makedirs(os.path.join(tmp_dir, dev, "device"))
- for key in ['device/driver']:
+ for key in ["device/driver"]:
if key in dev_attrs[dev] and dev_attrs[dev][key]:
target = dev_attrs[dev][key]
link = os.path.join(tmp_dir, dev, key)
- print('symlink %s -> %s' % (link, target))
+ print("symlink %s -> %s" % (link, target))
os.symlink(target, link)
mock_sys_dev_path.side_effect = sys_dev_path
class TestGenerateFallbackConfig(CiTestCase):
-
def setUp(self):
super(TestGenerateFallbackConfig, self).setUp()
self.add_patch(
- "cloudinit.util.get_cmdline", "m_get_cmdline",
- return_value="root=/dev/sda1")
+ "cloudinit.util.get_cmdline",
+ "m_get_cmdline",
+ return_value="root=/dev/sda1",
+ )
@mock.patch("cloudinit.net.sys_dev_path")
@mock.patch("cloudinit.net.read_sys_net")
@mock.patch("cloudinit.net.get_devicelist")
- def test_device_driver_v2(self, mock_get_devicelist, mock_read_sys_net,
- mock_sys_dev_path):
+ def test_device_driver_v2(
+ self, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path
+ ):
"""Network configuration for generate_fallback_config is version 2."""
devices = {
- 'eth0': {
- 'bridge': False, 'carrier': False, 'dormant': False,
- 'operstate': 'down', 'address': '00:11:22:33:44:55',
- 'device/driver': 'hv_netsvc', 'device/device': '0x3',
- 'name_assign_type': '4'},
- 'eth1': {
- 'bridge': False, 'carrier': False, 'dormant': False,
- 'operstate': 'down', 'address': '00:11:22:33:44:55',
- 'device/driver': 'mlx4_core', 'device/device': '0x7',
- 'name_assign_type': '4'},
-
+ "eth0": {
+ "bridge": False,
+ "carrier": False,
+ "dormant": False,
+ "operstate": "down",
+ "address": "00:11:22:33:44:55",
+ "device/driver": "hv_netsvc",
+ "device/device": "0x3",
+ "name_assign_type": "4",
+ },
+ "eth1": {
+ "bridge": False,
+ "carrier": False,
+ "dormant": False,
+ "operstate": "down",
+ "address": "00:11:22:33:44:55",
+ "device/driver": "mlx4_core",
+ "device/device": "0x7",
+ "name_assign_type": "4",
+ },
}
tmp_dir = self.tmp_dir()
- _setup_test(tmp_dir, mock_get_devicelist,
- mock_read_sys_net, mock_sys_dev_path,
- dev_attrs=devices)
+ _setup_test(
+ tmp_dir,
+ mock_get_devicelist,
+ mock_read_sys_net,
+ mock_sys_dev_path,
+ dev_attrs=devices,
+ )
network_cfg = net.generate_fallback_config(config_driver=True)
expected = {
- 'ethernets': {'eth0': {'dhcp4': True, 'set-name': 'eth0',
- 'match': {'macaddress': '00:11:22:33:44:55',
- 'driver': 'hv_netsvc'}}},
- 'version': 2}
+ "ethernets": {
+ "eth0": {
+ "dhcp4": True,
+ "set-name": "eth0",
+ "match": {
+ "macaddress": "00:11:22:33:44:55",
+ "driver": "hv_netsvc",
+ },
+ }
+ },
+ "version": 2,
+ }
self.assertEqual(expected, network_cfg)
@mock.patch("cloudinit.net.sys_dev_path")
@mock.patch("cloudinit.net.read_sys_net")
@mock.patch("cloudinit.net.get_devicelist")
- def test_device_driver(self, mock_get_devicelist, mock_read_sys_net,
- mock_sys_dev_path):
+ def test_device_driver(
+ self, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path
+ ):
devices = {
- 'eth0': {
- 'bridge': False, 'carrier': False, 'dormant': False,
- 'operstate': 'down', 'address': '00:11:22:33:44:55',
- 'device/driver': 'hv_netsvc', 'device/device': '0x3',
- 'name_assign_type': '4'},
- 'eth1': {
- 'bridge': False, 'carrier': False, 'dormant': False,
- 'operstate': 'down', 'address': '00:11:22:33:44:55',
- 'device/driver': 'mlx4_core', 'device/device': '0x7',
- 'name_assign_type': '4'},
-
+ "eth0": {
+ "bridge": False,
+ "carrier": False,
+ "dormant": False,
+ "operstate": "down",
+ "address": "00:11:22:33:44:55",
+ "device/driver": "hv_netsvc",
+ "device/device": "0x3",
+ "name_assign_type": "4",
+ },
+ "eth1": {
+ "bridge": False,
+ "carrier": False,
+ "dormant": False,
+ "operstate": "down",
+ "address": "00:11:22:33:44:55",
+ "device/driver": "mlx4_core",
+ "device/device": "0x7",
+ "name_assign_type": "4",
+ },
}
tmp_dir = self.tmp_dir()
- _setup_test(tmp_dir, mock_get_devicelist,
- mock_read_sys_net, mock_sys_dev_path,
- dev_attrs=devices)
+ _setup_test(
+ tmp_dir,
+ mock_get_devicelist,
+ mock_read_sys_net,
+ mock_sys_dev_path,
+ dev_attrs=devices,
+ )
network_cfg = net.generate_fallback_config(config_driver=True)
- ns = network_state.parse_net_config_data(network_cfg,
- skip_broken=False)
+ ns = network_state.parse_net_config_data(
+ network_cfg, skip_broken=False
+ )
render_dir = os.path.join(tmp_dir, "render")
os.makedirs(render_dir)
# don't set rulepath so eni writes them
renderer = eni.Renderer(
- {'eni_path': 'interfaces', 'netrules_path': 'netrules'})
+ {"eni_path": "interfaces", "netrules_path": "netrules"}
+ )
renderer.render_network_state(ns, target=render_dir)
- self.assertTrue(os.path.exists(os.path.join(render_dir,
- 'interfaces')))
- with open(os.path.join(render_dir, 'interfaces')) as fh:
+ self.assertTrue(os.path.exists(os.path.join(render_dir, "interfaces")))
+ with open(os.path.join(render_dir, "interfaces")) as fh:
contents = fh.read()
print(contents)
expected = """
@@ -2793,8 +3321,8 @@ iface eth0 inet dhcp
"""
self.assertEqual(expected.lstrip(), contents.lstrip())
- self.assertTrue(os.path.exists(os.path.join(render_dir, 'netrules')))
- with open(os.path.join(render_dir, 'netrules')) as fh:
+ self.assertTrue(os.path.exists(os.path.join(render_dir, "netrules")))
+ with open(os.path.join(render_dir, "netrules")) as fh:
contents = fh.read()
print(contents)
expected_rule = [
@@ -2804,48 +3332,65 @@ iface eth0 inet dhcp
'ATTR{address}=="00:11:22:33:44:55"',
'NAME="eth0"',
]
- self.assertEqual(", ".join(expected_rule) + '\n', contents.lstrip())
+ self.assertEqual(", ".join(expected_rule) + "\n", contents.lstrip())
@mock.patch("cloudinit.net.sys_dev_path")
@mock.patch("cloudinit.net.read_sys_net")
@mock.patch("cloudinit.net.get_devicelist")
- def test_device_driver_blacklist(self, mock_get_devicelist,
- mock_read_sys_net, mock_sys_dev_path):
+ def test_device_driver_blacklist(
+ self, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path
+ ):
devices = {
- 'eth1': {
- 'bridge': False, 'carrier': False, 'dormant': False,
- 'operstate': 'down', 'address': '00:11:22:33:44:55',
- 'device/driver': 'hv_netsvc', 'device/device': '0x3',
- 'name_assign_type': '4'},
- 'eth0': {
- 'bridge': False, 'carrier': False, 'dormant': False,
- 'operstate': 'down', 'address': '00:11:22:33:44:55',
- 'device/driver': 'mlx4_core', 'device/device': '0x7',
- 'name_assign_type': '4'},
+ "eth1": {
+ "bridge": False,
+ "carrier": False,
+ "dormant": False,
+ "operstate": "down",
+ "address": "00:11:22:33:44:55",
+ "device/driver": "hv_netsvc",
+ "device/device": "0x3",
+ "name_assign_type": "4",
+ },
+ "eth0": {
+ "bridge": False,
+ "carrier": False,
+ "dormant": False,
+ "operstate": "down",
+ "address": "00:11:22:33:44:55",
+ "device/driver": "mlx4_core",
+ "device/device": "0x7",
+ "name_assign_type": "4",
+ },
}
tmp_dir = self.tmp_dir()
- _setup_test(tmp_dir, mock_get_devicelist,
- mock_read_sys_net, mock_sys_dev_path,
- dev_attrs=devices)
+ _setup_test(
+ tmp_dir,
+ mock_get_devicelist,
+ mock_read_sys_net,
+ mock_sys_dev_path,
+ dev_attrs=devices,
+ )
- blacklist = ['mlx4_core']
- network_cfg = net.generate_fallback_config(blacklist_drivers=blacklist,
- config_driver=True)
- ns = network_state.parse_net_config_data(network_cfg,
- skip_broken=False)
+ blacklist = ["mlx4_core"]
+ network_cfg = net.generate_fallback_config(
+ blacklist_drivers=blacklist, config_driver=True
+ )
+ ns = network_state.parse_net_config_data(
+ network_cfg, skip_broken=False
+ )
render_dir = os.path.join(tmp_dir, "render")
os.makedirs(render_dir)
# don't set rulepath so eni writes them
renderer = eni.Renderer(
- {'eni_path': 'interfaces', 'netrules_path': 'netrules'})
+ {"eni_path": "interfaces", "netrules_path": "netrules"}
+ )
renderer.render_network_state(ns, target=render_dir)
- self.assertTrue(os.path.exists(os.path.join(render_dir,
- 'interfaces')))
- with open(os.path.join(render_dir, 'interfaces')) as fh:
+ self.assertTrue(os.path.exists(os.path.join(render_dir, "interfaces")))
+ with open(os.path.join(render_dir, "interfaces")) as fh:
contents = fh.read()
print(contents)
expected = """
@@ -2857,8 +3402,8 @@ iface eth1 inet dhcp
"""
self.assertEqual(expected.lstrip(), contents.lstrip())
- self.assertTrue(os.path.exists(os.path.join(render_dir, 'netrules')))
- with open(os.path.join(render_dir, 'netrules')) as fh:
+ self.assertTrue(os.path.exists(os.path.join(render_dir, "netrules")))
+ with open(os.path.join(render_dir, "netrules")) as fh:
contents = fh.read()
print(contents)
expected_rule = [
@@ -2868,35 +3413,54 @@ iface eth1 inet dhcp
'ATTR{address}=="00:11:22:33:44:55"',
'NAME="eth1"',
]
- self.assertEqual(", ".join(expected_rule) + '\n', contents.lstrip())
+ self.assertEqual(", ".join(expected_rule) + "\n", contents.lstrip())
@mock.patch("cloudinit.util.get_cmdline")
@mock.patch("cloudinit.util.udevadm_settle")
@mock.patch("cloudinit.net.sys_dev_path")
@mock.patch("cloudinit.net.read_sys_net")
@mock.patch("cloudinit.net.get_devicelist")
- def test_unstable_names(self, mock_get_devicelist, mock_read_sys_net,
- mock_sys_dev_path, mock_settle, m_get_cmdline):
+ def test_unstable_names(
+ self,
+ mock_get_devicelist,
+ mock_read_sys_net,
+ mock_sys_dev_path,
+ mock_settle,
+ m_get_cmdline,
+ ):
"""verify that udevadm settle is called when we find unstable names"""
devices = {
- 'eth0': {
- 'bridge': False, 'carrier': False, 'dormant': False,
- 'operstate': 'down', 'address': '00:11:22:33:44:55',
- 'device/driver': 'hv_netsvc', 'device/device': '0x3',
- 'name_assign_type': False},
- 'ens4': {
- 'bridge': False, 'carrier': False, 'dormant': False,
- 'operstate': 'down', 'address': '00:11:22:33:44:55',
- 'device/driver': 'mlx4_core', 'device/device': '0x7',
- 'name_assign_type': '4'},
-
+ "eth0": {
+ "bridge": False,
+ "carrier": False,
+ "dormant": False,
+ "operstate": "down",
+ "address": "00:11:22:33:44:55",
+ "device/driver": "hv_netsvc",
+ "device/device": "0x3",
+ "name_assign_type": False,
+ },
+ "ens4": {
+ "bridge": False,
+ "carrier": False,
+ "dormant": False,
+ "operstate": "down",
+ "address": "00:11:22:33:44:55",
+ "device/driver": "mlx4_core",
+ "device/device": "0x7",
+ "name_assign_type": "4",
+ },
}
- m_get_cmdline.return_value = ''
+ m_get_cmdline.return_value = ""
tmp_dir = self.tmp_dir()
- _setup_test(tmp_dir, mock_get_devicelist,
- mock_read_sys_net, mock_sys_dev_path,
- dev_attrs=devices)
+ _setup_test(
+ tmp_dir,
+ mock_get_devicelist,
+ mock_read_sys_net,
+ mock_sys_dev_path,
+ dev_attrs=devices,
+ )
net.generate_fallback_config(config_driver=True)
self.assertEqual(1, mock_settle.call_count)
@@ -2905,48 +3469,73 @@ iface eth1 inet dhcp
@mock.patch("cloudinit.net.sys_dev_path")
@mock.patch("cloudinit.net.read_sys_net")
@mock.patch("cloudinit.net.get_devicelist")
- def test_unstable_names_disabled(self, mock_get_devicelist,
- mock_read_sys_net, mock_sys_dev_path,
- mock_settle, m_get_cmdline):
+ def test_unstable_names_disabled(
+ self,
+ mock_get_devicelist,
+ mock_read_sys_net,
+ mock_sys_dev_path,
+ mock_settle,
+ m_get_cmdline,
+ ):
"""verify udevadm settle not called when cmdline has net.ifnames=0"""
devices = {
- 'eth0': {
- 'bridge': False, 'carrier': False, 'dormant': False,
- 'operstate': 'down', 'address': '00:11:22:33:44:55',
- 'device/driver': 'hv_netsvc', 'device/device': '0x3',
- 'name_assign_type': False},
- 'ens4': {
- 'bridge': False, 'carrier': False, 'dormant': False,
- 'operstate': 'down', 'address': '00:11:22:33:44:55',
- 'device/driver': 'mlx4_core', 'device/device': '0x7',
- 'name_assign_type': '4'},
-
+ "eth0": {
+ "bridge": False,
+ "carrier": False,
+ "dormant": False,
+ "operstate": "down",
+ "address": "00:11:22:33:44:55",
+ "device/driver": "hv_netsvc",
+ "device/device": "0x3",
+ "name_assign_type": False,
+ },
+ "ens4": {
+ "bridge": False,
+ "carrier": False,
+ "dormant": False,
+ "operstate": "down",
+ "address": "00:11:22:33:44:55",
+ "device/driver": "mlx4_core",
+ "device/device": "0x7",
+ "name_assign_type": "4",
+ },
}
- m_get_cmdline.return_value = 'net.ifnames=0'
+ m_get_cmdline.return_value = "net.ifnames=0"
tmp_dir = self.tmp_dir()
- _setup_test(tmp_dir, mock_get_devicelist,
- mock_read_sys_net, mock_sys_dev_path,
- dev_attrs=devices)
+ _setup_test(
+ tmp_dir,
+ mock_get_devicelist,
+ mock_read_sys_net,
+ mock_sys_dev_path,
+ dev_attrs=devices,
+ )
net.generate_fallback_config(config_driver=True)
self.assertEqual(0, mock_settle.call_count)
+@mock.patch(
+ "cloudinit.net.is_openvswitch_internal_interface",
+ mock.Mock(return_value=False),
+)
class TestRhelSysConfigRendering(CiTestCase):
with_logs = True
nm_cfg_file = "/etc/NetworkManager/NetworkManager.conf"
- scripts_dir = '/etc/sysconfig/network-scripts'
- header = ('# Created by cloud-init on instance boot automatically, '
- 'do not edit.\n#\n')
+ scripts_dir = "/etc/sysconfig/network-scripts"
+ header = (
+ "# Created by cloud-init on instance boot automatically, "
+ "do not edit.\n#\n"
+ )
- expected_name = 'expected_sysconfig_rhel'
+ expected_name = "expected_sysconfig_rhel"
def _get_renderer(self):
- distro_cls = distros.fetch('rhel')
+ distro_cls = distros.fetch("rhel")
return sysconfig.Renderer(
- config=distro_cls.renderer_configs.get('sysconfig'))
+ config=distro_cls.renderer_configs.get("sysconfig")
+ )
def _render_and_read(self, network_config=None, state=None, dir=None):
if dir is None:
@@ -2964,9 +3553,8 @@ class TestRhelSysConfigRendering(CiTestCase):
return dir2dict(dir)
def _compare_files_to_expected(self, expected, found):
-
def _try_load(f):
- ''' Attempt to load shell content, otherwise return as-is '''
+ """Attempt to load shell content, otherwise return as-is"""
try:
return util.load_shell_content(f)
except ValueError:
@@ -2977,12 +3565,15 @@ class TestRhelSysConfigRendering(CiTestCase):
orig_maxdiff = self.maxDiff
expected_d = dict(
(os.path.join(self.scripts_dir, k), _try_load(v))
- for k, v in expected.items())
+ for k, v in expected.items()
+ )
# only compare the files in scripts_dir
scripts_found = dict(
- (k, _try_load(v)) for k, v in found.items()
- if k.startswith(self.scripts_dir))
+ (k, _try_load(v))
+ for k, v in found.items()
+ if k.startswith(self.scripts_dir)
+ )
try:
self.maxDiff = None
self.assertEqual(expected_d, scripts_found)
@@ -2990,9 +3581,14 @@ class TestRhelSysConfigRendering(CiTestCase):
self.maxDiff = orig_maxdiff
def _assert_headers(self, found):
- missing = [f for f in found
- if (f.startswith(self.scripts_dir) and
- not found[f].startswith(self.header))]
+ missing = [
+ f
+ for f in found
+ if (
+ f.startswith(self.scripts_dir)
+ and not found[f].startswith(self.header)
+ )
+ ]
if missing:
raise AssertionError("Missing headers in: %s" % missing)
@@ -3000,16 +3596,22 @@ class TestRhelSysConfigRendering(CiTestCase):
@mock.patch("cloudinit.net.sys_dev_path")
@mock.patch("cloudinit.net.read_sys_net")
@mock.patch("cloudinit.net.get_devicelist")
- def test_default_generation(self, mock_get_devicelist,
- mock_read_sys_net,
- mock_sys_dev_path, m_get_cmdline):
+ def test_default_generation(
+ self,
+ mock_get_devicelist,
+ mock_read_sys_net,
+ mock_sys_dev_path,
+ m_get_cmdline,
+ ):
tmp_dir = self.tmp_dir()
- _setup_test(tmp_dir, mock_get_devicelist,
- mock_read_sys_net, mock_sys_dev_path)
+ _setup_test(
+ tmp_dir, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path
+ )
network_cfg = net.generate_fallback_config()
- ns = network_state.parse_net_config_data(network_cfg,
- skip_broken=False)
+ ns = network_state.parse_net_config_data(
+ network_cfg, skip_broken=False
+ )
render_dir = os.path.join(tmp_dir, "render")
os.makedirs(render_dir)
@@ -3017,7 +3619,7 @@ class TestRhelSysConfigRendering(CiTestCase):
renderer = self._get_renderer()
renderer.render_network_state(ns, target=render_dir)
- render_file = 'etc/sysconfig/network-scripts/ifcfg-eth1000'
+ render_file = "etc/sysconfig/network-scripts/ifcfg-eth1000"
with open(os.path.join(render_dir, render_file)) as fh:
content = fh.read()
expected_content = """
@@ -3037,35 +3639,44 @@ USERCTL=no
"""ValueError is raised when duplicate ipv4 gateways exist."""
net_json = {
"services": [{"type": "dns", "address": "172.19.0.12"}],
- "networks": [{
- "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4",
- "type": "ipv4", "netmask": "255.255.252.0",
- "link": "tap1a81968a-79",
- "routes": [{
- "netmask": "0.0.0.0",
- "network": "0.0.0.0",
- "gateway": "172.19.3.254",
- }, {
- "netmask": "0.0.0.0", # A second default gateway
- "network": "0.0.0.0",
- "gateway": "172.20.3.254",
- }],
- "ip_address": "172.19.1.34", "id": "network0"
- }],
+ "networks": [
+ {
+ "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4",
+ "type": "ipv4",
+ "netmask": "255.255.252.0",
+ "link": "tap1a81968a-79",
+ "routes": [
+ {
+ "netmask": "0.0.0.0",
+ "network": "0.0.0.0",
+ "gateway": "172.19.3.254",
+ },
+ {
+ "netmask": "0.0.0.0", # A second default gateway
+ "network": "0.0.0.0",
+ "gateway": "172.20.3.254",
+ },
+ ],
+ "ip_address": "172.19.1.34",
+ "id": "network0",
+ }
+ ],
"links": [
{
"ethernet_mac_address": "fa:16:3e:ed:9a:59",
- "mtu": None, "type": "bridge", "id":
- "tap1a81968a-79",
- "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f"
+ "mtu": None,
+ "type": "bridge",
+ "id": "tap1a81968a-79",
+ "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f",
},
],
}
- macs = {'fa:16:3e:ed:9a:59': 'eth0'}
+ macs = {"fa:16:3e:ed:9a:59": "eth0"}
render_dir = self.tmp_dir()
network_cfg = openstack.convert_net_json(net_json, known_macs=macs)
- ns = network_state.parse_net_config_data(network_cfg,
- skip_broken=False)
+ ns = network_state.parse_net_config_data(
+ network_cfg, skip_broken=False
+ )
renderer = self._get_renderer()
with self.assertRaises(ValueError):
renderer.render_network_state(ns, target=render_dir)
@@ -3075,56 +3686,138 @@ USERCTL=no
"""ValueError is raised when duplicate ipv6 gateways exist."""
net_json = {
"services": [{"type": "dns", "address": "172.19.0.12"}],
- "networks": [{
- "network_id": "public-ipv6",
- "type": "ipv6", "netmask": "",
- "link": "tap1a81968a-79",
- "routes": [{
- "gateway": "2001:DB8::1",
- "netmask": "::",
- "network": "::"
- }, {
- "gateway": "2001:DB9::1",
- "netmask": "::",
- "network": "::"
- }],
- "ip_address": "2001:DB8::10", "id": "network1"
- }],
+ "networks": [
+ {
+ "network_id": "public-ipv6",
+ "type": "ipv6",
+ "netmask": "",
+ "link": "tap1a81968a-79",
+ "routes": [
+ {
+ "gateway": "2001:DB8::1",
+ "netmask": "::",
+ "network": "::",
+ },
+ {
+ "gateway": "2001:DB9::1",
+ "netmask": "::",
+ "network": "::",
+ },
+ ],
+ "ip_address": "2001:DB8::10",
+ "id": "network1",
+ }
+ ],
"links": [
{
"ethernet_mac_address": "fa:16:3e:ed:9a:59",
- "mtu": None, "type": "bridge", "id":
- "tap1a81968a-79",
- "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f"
+ "mtu": None,
+ "type": "bridge",
+ "id": "tap1a81968a-79",
+ "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f",
},
],
}
- macs = {'fa:16:3e:ed:9a:59': 'eth0'}
+ macs = {"fa:16:3e:ed:9a:59": "eth0"}
render_dir = self.tmp_dir()
network_cfg = openstack.convert_net_json(net_json, known_macs=macs)
- ns = network_state.parse_net_config_data(network_cfg,
- skip_broken=False)
+ ns = network_state.parse_net_config_data(
+ network_cfg, skip_broken=False
+ )
renderer = self._get_renderer()
with self.assertRaises(ValueError):
renderer.render_network_state(ns, target=render_dir)
self.assertEqual([], os.listdir(render_dir))
+ def test_invalid_network_mask_ipv6(self):
+ net_json = {
+ "services": [{"type": "dns", "address": "172.19.0.12"}],
+ "networks": [
+ {
+ "network_id": "public-ipv6",
+ "type": "ipv6",
+ "netmask": "",
+ "link": "tap1a81968a-79",
+ "routes": [
+ {
+ "gateway": "2001:DB8::1",
+ "netmask": "ff:ff:ff:ff::",
+ "network": "2001:DB8:1::1",
+ },
+ ],
+ "ip_address": "2001:DB8::10",
+ "id": "network1",
+ }
+ ],
+ "links": [
+ {
+ "ethernet_mac_address": "fa:16:3e:ed:9a:59",
+ "mtu": None,
+ "type": "bridge",
+ "id": "tap1a81968a-79",
+ "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f",
+ },
+ ],
+ }
+ macs = {"fa:16:3e:ed:9a:59": "eth0"}
+ network_cfg = openstack.convert_net_json(net_json, known_macs=macs)
+ with self.assertRaises(ValueError):
+ network_state.parse_net_config_data(network_cfg, skip_broken=False)
+
+ def test_invalid_network_mask_ipv4(self):
+ net_json = {
+ "services": [{"type": "dns", "address": "172.19.0.12"}],
+ "networks": [
+ {
+ "network_id": "public-ipv4",
+ "type": "ipv4",
+ "netmask": "",
+ "link": "tap1a81968a-79",
+ "routes": [
+ {
+ "gateway": "172.20.0.1",
+ "netmask": "255.234.255.0",
+ "network": "172.19.0.0",
+ },
+ ],
+ "ip_address": "172.20.0.10",
+ "id": "network1",
+ }
+ ],
+ "links": [
+ {
+ "ethernet_mac_address": "fa:16:3e:ed:9a:59",
+ "mtu": None,
+ "type": "bridge",
+ "id": "tap1a81968a-79",
+ "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f",
+ },
+ ],
+ }
+ macs = {"fa:16:3e:ed:9a:59": "eth0"}
+ network_cfg = openstack.convert_net_json(net_json, known_macs=macs)
+ with self.assertRaises(ValueError):
+ network_state.parse_net_config_data(network_cfg, skip_broken=False)
+
def test_openstack_rendering_samples(self):
for os_sample in OS_SAMPLES:
render_dir = self.tmp_dir()
- ex_input = os_sample['in_data']
- ex_mac_addrs = os_sample['in_macs']
+ ex_input = os_sample["in_data"]
+ ex_mac_addrs = os_sample["in_macs"]
network_cfg = openstack.convert_net_json(
- ex_input, known_macs=ex_mac_addrs)
- ns = network_state.parse_net_config_data(network_cfg,
- skip_broken=False)
+ ex_input, known_macs=ex_mac_addrs
+ )
+ ns = network_state.parse_net_config_data(
+ network_cfg, skip_broken=False
+ )
renderer = self._get_renderer()
# render a multiple times to simulate reboots
renderer.render_network_state(ns, target=render_dir)
renderer.render_network_state(ns, target=render_dir)
renderer.render_network_state(ns, target=render_dir)
- for fn, expected_content in os_sample.get('out_sysconfig_rhel',
- []):
+ for fn, expected_content in os_sample.get(
+ "out_sysconfig_rhel", []
+ ):
with open(os.path.join(render_dir, fn)) as fh:
self.assertEqual(expected_content, fh.read())
@@ -3135,8 +3828,8 @@ USERCTL=no
renderer = self._get_renderer()
renderer.render_network_state(ns, target=render_dir)
found = dir2dict(render_dir)
- nspath = '/etc/sysconfig/network-scripts/'
- self.assertNotIn(nspath + 'ifcfg-lo', found.keys())
+ nspath = "/etc/sysconfig/network-scripts/"
+ self.assertNotIn(nspath + "ifcfg-lo", found.keys())
expected = """\
# Created by cloud-init on instance boot automatically, do not edit.
#
@@ -3152,10 +3845,10 @@ ONBOOT=yes
TYPE=Ethernet
USERCTL=no
"""
- self.assertEqual(expected, found[nspath + 'ifcfg-interface0'])
+ self.assertEqual(expected, found[nspath + "ifcfg-interface0"])
# The configuration has no nameserver information make sure we
# do not write the resolv.conf file
- respath = '/etc/resolv.conf'
+ respath = "/etc/resolv.conf"
self.assertNotIn(respath, found.keys())
def test_network_config_v1_multi_iface_samples(self):
@@ -3165,8 +3858,8 @@ USERCTL=no
renderer = self._get_renderer()
renderer.render_network_state(ns, target=render_dir)
found = dir2dict(render_dir)
- nspath = '/etc/sysconfig/network-scripts/'
- self.assertNotIn(nspath + 'ifcfg-lo', found.keys())
+ nspath = "/etc/sysconfig/network-scripts/"
+ self.assertNotIn(nspath + "ifcfg-lo", found.keys())
expected_i1 = """\
# Created by cloud-init on instance boot automatically, do not edit.
#
@@ -3183,7 +3876,7 @@ ONBOOT=yes
TYPE=Ethernet
USERCTL=no
"""
- self.assertEqual(expected_i1, found[nspath + 'ifcfg-eth0'])
+ self.assertEqual(expected_i1, found[nspath + "ifcfg-eth0"])
expected_i2 = """\
# Created by cloud-init on instance boot automatically, do not edit.
#
@@ -3197,21 +3890,21 @@ ONBOOT=yes
TYPE=Ethernet
USERCTL=no
"""
- self.assertEqual(expected_i2, found[nspath + 'ifcfg-eth1'])
+ self.assertEqual(expected_i2, found[nspath + "ifcfg-eth1"])
def test_config_with_explicit_loopback(self):
ns = network_state.parse_net_config_data(CONFIG_V1_EXPLICIT_LOOPBACK)
render_dir = self.tmp_path("render")
os.makedirs(render_dir)
# write an etc/resolv.conf and expect it to not be modified
- resolvconf = os.path.join(render_dir, 'etc/resolv.conf')
+ resolvconf = os.path.join(render_dir, "etc/resolv.conf")
resolvconf_content = "# Original Content"
util.write_file(resolvconf, resolvconf_content)
renderer = self._get_renderer()
renderer.render_network_state(ns, target=render_dir)
found = dir2dict(render_dir)
- nspath = '/etc/sysconfig/network-scripts/'
- self.assertNotIn(nspath + 'ifcfg-lo', found.keys())
+ nspath = "/etc/sysconfig/network-scripts/"
+ self.assertNotIn(nspath + "ifcfg-lo", found.keys())
expected = """\
# Created by cloud-init on instance boot automatically, do not edit.
#
@@ -3222,171 +3915,188 @@ ONBOOT=yes
TYPE=Ethernet
USERCTL=no
"""
- self.assertEqual(expected, found[nspath + 'ifcfg-eth0'])
+ self.assertEqual(expected, found[nspath + "ifcfg-eth0"])
# a dhcp only config should not modify resolv.conf
- self.assertEqual(resolvconf_content, found['/etc/resolv.conf'])
+ self.assertEqual(resolvconf_content, found["/etc/resolv.conf"])
def test_bond_config(self):
- entry = NETWORK_CONFIGS['bond']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["bond"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_vlan_config(self):
- entry = NETWORK_CONFIGS['vlan']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["vlan"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_bridge_config(self):
- entry = NETWORK_CONFIGS['bridge']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["bridge"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_manual_config(self):
- entry = NETWORK_CONFIGS['manual']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["manual"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_all_config(self):
- entry = NETWORK_CONFIGS['all']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["all"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
self.assertNotIn(
- 'WARNING: Network config: ignoring eth0.101 device-level mtu',
- self.logs.getvalue())
+ "WARNING: Network config: ignoring eth0.101 device-level mtu",
+ self.logs.getvalue(),
+ )
def test_small_config(self):
- entry = NETWORK_CONFIGS['small']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["small"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_v4_and_v6_static_config(self):
- entry = NETWORK_CONFIGS['v4_and_v6_static']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["v4_and_v6_static"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
expected_msg = (
- 'WARNING: Network config: ignoring iface0 device-level mtu:8999'
- ' because ipv4 subnet-level mtu:9000 provided.')
+ "WARNING: Network config: ignoring iface0 device-level mtu:8999"
+ " because ipv4 subnet-level mtu:9000 provided."
+ )
self.assertIn(expected_msg, self.logs.getvalue())
def test_dhcpv6_only_config(self):
- entry = NETWORK_CONFIGS['dhcpv6_only']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["dhcpv6_only"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_dhcpv6_accept_ra_config_v1(self):
- entry = NETWORK_CONFIGS['dhcpv6_accept_ra']
- found = self._render_and_read(network_config=yaml.load(
- entry['yaml_v1']))
+ entry = NETWORK_CONFIGS["dhcpv6_accept_ra"]
+ found = self._render_and_read(
+ network_config=yaml.load(entry["yaml_v1"])
+ )
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_dhcpv6_accept_ra_config_v2(self):
- entry = NETWORK_CONFIGS['dhcpv6_accept_ra']
- found = self._render_and_read(network_config=yaml.load(
- entry['yaml_v2']))
+ entry = NETWORK_CONFIGS["dhcpv6_accept_ra"]
+ found = self._render_and_read(
+ network_config=yaml.load(entry["yaml_v2"])
+ )
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_dhcpv6_reject_ra_config_v1(self):
- entry = NETWORK_CONFIGS['dhcpv6_reject_ra']
- found = self._render_and_read(network_config=yaml.load(
- entry['yaml_v1']))
+ entry = NETWORK_CONFIGS["dhcpv6_reject_ra"]
+ found = self._render_and_read(
+ network_config=yaml.load(entry["yaml_v1"])
+ )
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_stattic6_from_json(self):
net_json = {
"services": [{"type": "dns", "address": "172.19.0.12"}],
- "networks": [{
- "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4",
- "type": "ipv4", "netmask": "255.255.252.0",
- "link": "tap1a81968a-79",
- "routes": [{
- "netmask": "0.0.0.0",
- "network": "0.0.0.0",
- "gateway": "172.19.3.254",
- }, {
- "netmask": "0.0.0.0", # A second default gateway
- "network": "0.0.0.0",
- "gateway": "172.20.3.254",
- }],
- "ip_address": "172.19.1.34", "id": "network0"
- }, {
- "network_id": "mgmt",
- "netmask": "ffff:ffff:ffff:ffff::",
- "link": "interface1",
- "mode": "link-local",
- "routes": [],
- "ip_address": "fe80::c096:67ff:fe5c:6e84",
- "type": "static6",
- "id": "network1",
- "services": [],
- "accept-ra": "false"
- }],
+ "networks": [
+ {
+ "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4",
+ "type": "ipv4",
+ "netmask": "255.255.252.0",
+ "link": "tap1a81968a-79",
+ "routes": [
+ {
+ "netmask": "0.0.0.0",
+ "network": "0.0.0.0",
+ "gateway": "172.19.3.254",
+ },
+ {
+ "netmask": "0.0.0.0", # A second default gateway
+ "network": "0.0.0.0",
+ "gateway": "172.20.3.254",
+ },
+ ],
+ "ip_address": "172.19.1.34",
+ "id": "network0",
+ },
+ {
+ "network_id": "mgmt",
+ "netmask": "ffff:ffff:ffff:ffff::",
+ "link": "interface1",
+ "mode": "link-local",
+ "routes": [],
+ "ip_address": "fe80::c096:67ff:fe5c:6e84",
+ "type": "static6",
+ "id": "network1",
+ "services": [],
+ "accept-ra": "false",
+ },
+ ],
"links": [
{
"ethernet_mac_address": "fa:16:3e:ed:9a:59",
- "mtu": None, "type": "bridge", "id":
- "tap1a81968a-79",
- "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f"
+ "mtu": None,
+ "type": "bridge",
+ "id": "tap1a81968a-79",
+ "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f",
},
],
}
- macs = {'fa:16:3e:ed:9a:59': 'eth0'}
+ macs = {"fa:16:3e:ed:9a:59": "eth0"}
render_dir = self.tmp_dir()
network_cfg = openstack.convert_net_json(net_json, known_macs=macs)
- ns = network_state.parse_net_config_data(network_cfg,
- skip_broken=False)
+ ns = network_state.parse_net_config_data(
+ network_cfg, skip_broken=False
+ )
renderer = self._get_renderer()
with self.assertRaises(ValueError):
renderer.render_network_state(ns, target=render_dir)
self.assertEqual([], os.listdir(render_dir))
def test_static6_from_yaml(self):
- entry = NETWORK_CONFIGS['static6']
- found = self._render_and_read(network_config=yaml.load(
- entry['yaml']))
+ entry = NETWORK_CONFIGS["static6"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_dhcpv6_reject_ra_config_v2(self):
- entry = NETWORK_CONFIGS['dhcpv6_reject_ra']
- found = self._render_and_read(network_config=yaml.load(
- entry['yaml_v2']))
+ entry = NETWORK_CONFIGS["dhcpv6_reject_ra"]
+ found = self._render_and_read(
+ network_config=yaml.load(entry["yaml_v2"])
+ )
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_dhcpv6_stateless_config(self):
- entry = NETWORK_CONFIGS['dhcpv6_stateless']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["dhcpv6_stateless"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_dhcpv6_stateful_config(self):
- entry = NETWORK_CONFIGS['dhcpv6_stateful']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["dhcpv6_stateful"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_wakeonlan_disabled_config_v2(self):
- entry = NETWORK_CONFIGS['wakeonlan_disabled']
- found = self._render_and_read(network_config=yaml.load(
- entry['yaml_v2']))
+ entry = NETWORK_CONFIGS["wakeonlan_disabled"]
+ found = self._render_and_read(
+ network_config=yaml.load(entry["yaml_v2"])
+ )
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_wakeonlan_enabled_config_v2(self):
- entry = NETWORK_CONFIGS['wakeonlan_enabled']
- found = self._render_and_read(network_config=yaml.load(
- entry['yaml_v2']))
+ entry = NETWORK_CONFIGS["wakeonlan_enabled"]
+ found = self._render_and_read(
+ network_config=yaml.load(entry["yaml_v2"])
+ )
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
@@ -3397,20 +4107,21 @@ USERCTL=no
util.ensure_dir(os.path.dirname(nm_cfg))
# write a template nm.conf, note plugins is a list here
- with open(nm_cfg, 'w') as fh:
- fh.write('# test_check_ifcfg_rh\n[main]\nplugins=foo,bar\n')
+ with open(nm_cfg, "w") as fh:
+ fh.write("# test_check_ifcfg_rh\n[main]\nplugins=foo,bar\n")
self.assertTrue(os.path.exists(nm_cfg))
# render and read
- entry = NETWORK_CONFIGS['small']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']),
- dir=render_dir)
+ entry = NETWORK_CONFIGS["small"]
+ found = self._render_and_read(
+ network_config=yaml.load(entry["yaml"]), dir=render_dir
+ )
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
# check ifcfg-rh is in the 'plugins' list
config = sysconfig.ConfigObj(nm_cfg)
- self.assertIn('ifcfg-rh', config['main']['plugins'])
+ self.assertIn("ifcfg-rh", config["main"]["plugins"])
def test_check_ifcfg_rh_plugins_string(self):
"""ifcfg-rh plugin is append when plugins is a string."""
@@ -3420,22 +4131,23 @@ USERCTL=no
util.ensure_dir(os.path.dirname(nm_cfg))
# write a template nm.conf, note plugins is a value here
- util.write_file(nm_cfg, '# test_check_ifcfg_rh\n[main]\nplugins=foo\n')
+ util.write_file(nm_cfg, "# test_check_ifcfg_rh\n[main]\nplugins=foo\n")
# render and read
- entry = NETWORK_CONFIGS['small']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']),
- dir=render_dir)
+ entry = NETWORK_CONFIGS["small"]
+ found = self._render_and_read(
+ network_config=yaml.load(entry["yaml"]), dir=render_dir
+ )
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
# check raw content has plugin
nm_file_content = util.load_file(nm_cfg)
- self.assertIn('ifcfg-rh', nm_file_content)
+ self.assertIn("ifcfg-rh", nm_file_content)
# check ifcfg-rh is in the 'plugins' list
config = sysconfig.ConfigObj(nm_cfg)
- self.assertIn('ifcfg-rh', config['main']['plugins'])
+ self.assertIn("ifcfg-rh", config["main"]["plugins"])
def test_check_ifcfg_rh_plugins_no_plugins(self):
"""enable_ifcfg_plugin creates plugins value if missing."""
@@ -3445,28 +4157,32 @@ USERCTL=no
util.ensure_dir(os.path.dirname(nm_cfg))
# write a template nm.conf, note plugins is missing
- util.write_file(nm_cfg, '# test_check_ifcfg_rh\n[main]\n')
+ util.write_file(nm_cfg, "# test_check_ifcfg_rh\n[main]\n")
self.assertTrue(os.path.exists(nm_cfg))
# render and read
- entry = NETWORK_CONFIGS['small']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']),
- dir=render_dir)
+ entry = NETWORK_CONFIGS["small"]
+ found = self._render_and_read(
+ network_config=yaml.load(entry["yaml"]), dir=render_dir
+ )
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
# check ifcfg-rh is in the 'plugins' list
config = sysconfig.ConfigObj(nm_cfg)
- self.assertIn('ifcfg-rh', config['main']['plugins'])
+ self.assertIn("ifcfg-rh", config["main"]["plugins"])
def test_netplan_dhcp_false_disable_dhcp_in_state(self):
"""netplan config with dhcp[46]: False should not add dhcp in state"""
net_config = yaml.load(NETPLAN_DHCP_FALSE)
- ns = network_state.parse_net_config_data(net_config,
- skip_broken=False)
+ ns = network_state.parse_net_config_data(net_config, skip_broken=False)
- dhcp_found = [snet for iface in ns.iter_interfaces()
- for snet in iface['subnets'] if 'dhcp' in snet['type']]
+ dhcp_found = [
+ snet
+ for iface in ns.iter_interfaces()
+ for snet in iface["subnets"]
+ if "dhcp" in snet["type"]
+ ]
self.assertEqual([], dhcp_found)
@@ -3474,9 +4190,10 @@ USERCTL=no
"""netplan cfg with dhcp[46]: False should not have bootproto=dhcp"""
entry = {
- 'yaml': NETPLAN_DHCP_FALSE,
- 'expected_sysconfig': {
- 'ifcfg-ens3': textwrap.dedent("""\
+ "yaml": NETPLAN_DHCP_FALSE,
+ "expected_sysconfig": {
+ "ifcfg-ens3": textwrap.dedent(
+ """\
BOOTPROTO=none
DEFROUTE=yes
DEVICE=ens3
@@ -3496,33 +4213,42 @@ USERCTL=no
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
- """),
- }
+ """
+ ),
+ },
}
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
- self._compare_files_to_expected(entry['expected_sysconfig'], found)
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
+ self._compare_files_to_expected(entry["expected_sysconfig"], found)
self._assert_headers(found)
def test_from_v2_vlan_mtu(self):
"""verify mtu gets rendered on bond when source is netplan."""
v2data = {
- 'version': 2,
- 'ethernets': {'eno1': {}},
- 'vlans': {
- 'eno1.1000': {
- 'addresses': ["192.6.1.9/24"],
- 'id': 1000, 'link': 'eno1', 'mtu': 1495}}}
+ "version": 2,
+ "ethernets": {"eno1": {}},
+ "vlans": {
+ "eno1.1000": {
+ "addresses": ["192.6.1.9/24"],
+ "id": 1000,
+ "link": "eno1",
+ "mtu": 1495,
+ }
+ },
+ }
expected = {
- 'ifcfg-eno1': textwrap.dedent("""\
+ "ifcfg-eno1": textwrap.dedent(
+ """\
BOOTPROTO=none
DEVICE=eno1
NM_CONTROLLED=no
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
- """),
- 'ifcfg-eno1.1000': textwrap.dedent("""\
+ """
+ ),
+ "ifcfg-eno1.1000": textwrap.dedent(
+ """\
BOOTPROTO=none
DEVICE=eno1.1000
IPADDR=192.6.1.9
@@ -3533,23 +4259,29 @@ USERCTL=no
PHYSDEV=eno1
USERCTL=no
VLAN=yes
- """)
+ """
+ ),
}
self._compare_files_to_expected(
- expected, self._render_and_read(network_config=v2data))
+ expected, self._render_and_read(network_config=v2data)
+ )
def test_from_v2_bond_mtu(self):
"""verify mtu gets rendered on bond when source is netplan."""
v2data = {
- 'version': 2,
- 'bonds': {
- 'bond0': {'addresses': ['10.101.8.65/26'],
- 'interfaces': ['enp0s0', 'enp0s1'],
- 'mtu': 1334,
- 'parameters': {}}}
+ "version": 2,
+ "bonds": {
+ "bond0": {
+ "addresses": ["10.101.8.65/26"],
+ "interfaces": ["enp0s0", "enp0s1"],
+ "mtu": 1334,
+ "parameters": {},
+ }
+ },
}
expected = {
- 'ifcfg-bond0': textwrap.dedent("""\
+ "ifcfg-bond0": textwrap.dedent(
+ """\
BONDING_MASTER=yes
BONDING_SLAVE0=enp0s0
BONDING_SLAVE1=enp0s1
@@ -3562,8 +4294,10 @@ USERCTL=no
ONBOOT=yes
TYPE=Bond
USERCTL=no
- """),
- 'ifcfg-enp0s0': textwrap.dedent("""\
+ """
+ ),
+ "ifcfg-enp0s0": textwrap.dedent(
+ """\
BONDING_MASTER=yes
BOOTPROTO=none
DEVICE=enp0s0
@@ -3573,8 +4307,10 @@ USERCTL=no
SLAVE=yes
TYPE=Bond
USERCTL=no
- """),
- 'ifcfg-enp0s1': textwrap.dedent("""\
+ """
+ ),
+ "ifcfg-enp0s1": textwrap.dedent(
+ """\
BONDING_MASTER=yes
BOOTPROTO=none
DEVICE=enp0s1
@@ -3584,21 +4320,28 @@ USERCTL=no
SLAVE=yes
TYPE=Bond
USERCTL=no
- """)
+ """
+ ),
}
self._compare_files_to_expected(
- expected, self._render_and_read(network_config=v2data))
+ expected, self._render_and_read(network_config=v2data)
+ )
def test_from_v2_route_metric(self):
"""verify route-metric gets rendered on nic when source is netplan."""
- overrides = {'route-metric': 100}
+ overrides = {"route-metric": 100}
v2base = {
- 'version': 2,
- 'ethernets': {
- 'eno1': {'dhcp4': True,
- 'match': {'macaddress': '07-1c-c6-75-a4-be'}}}}
+ "version": 2,
+ "ethernets": {
+ "eno1": {
+ "dhcp4": True,
+ "match": {"macaddress": "07-1c-c6-75-a4-be"},
+ }
+ },
+ }
expected = {
- 'ifcfg-eno1': textwrap.dedent("""\
+ "ifcfg-eno1": textwrap.dedent(
+ """\
BOOTPROTO=dhcp
DEVICE=eno1
HWADDR=07-1c-c6-75-a4-be
@@ -3607,32 +4350,42 @@ USERCTL=no
ONBOOT=yes
TYPE=Ethernet
USERCTL=no
- """),
+ """
+ ),
}
- for dhcp_ver in ('dhcp4', 'dhcp6'):
+ for dhcp_ver in ("dhcp4", "dhcp6"):
v2data = copy.deepcopy(v2base)
- if dhcp_ver == 'dhcp6':
- expected['ifcfg-eno1'] += "IPV6INIT=yes\nDHCPV6C=yes\n"
- v2data['ethernets']['eno1'].update(
- {dhcp_ver: True, '{0}-overrides'.format(dhcp_ver): overrides})
+ if dhcp_ver == "dhcp6":
+ expected["ifcfg-eno1"] += "IPV6INIT=yes\nDHCPV6C=yes\n"
+ v2data["ethernets"]["eno1"].update(
+ {dhcp_ver: True, "{0}-overrides".format(dhcp_ver): overrides}
+ )
self._compare_files_to_expected(
- expected, self._render_and_read(network_config=v2data))
+ expected, self._render_and_read(network_config=v2data)
+ )
+@mock.patch(
+ "cloudinit.net.is_openvswitch_internal_interface",
+ mock.Mock(return_value=False),
+)
class TestOpenSuseSysConfigRendering(CiTestCase):
with_logs = True
- scripts_dir = '/etc/sysconfig/network'
- header = ('# Created by cloud-init on instance boot automatically, '
- 'do not edit.\n#\n')
+ scripts_dir = "/etc/sysconfig/network"
+ header = (
+ "# Created by cloud-init on instance boot automatically, "
+ "do not edit.\n#\n"
+ )
- expected_name = 'expected_sysconfig_opensuse'
+ expected_name = "expected_sysconfig_opensuse"
def _get_renderer(self):
- distro_cls = distros.fetch('opensuse')
+ distro_cls = distros.fetch("opensuse")
return sysconfig.Renderer(
- config=distro_cls.renderer_configs.get('sysconfig'))
+ config=distro_cls.renderer_configs.get("sysconfig")
+ )
def _render_and_read(self, network_config=None, state=None, dir=None):
if dir is None:
@@ -3653,12 +4406,15 @@ class TestOpenSuseSysConfigRendering(CiTestCase):
orig_maxdiff = self.maxDiff
expected_d = dict(
(os.path.join(self.scripts_dir, k), util.load_shell_content(v))
- for k, v in expected.items())
+ for k, v in expected.items()
+ )
# only compare the files in scripts_dir
scripts_found = dict(
- (k, util.load_shell_content(v)) for k, v in found.items()
- if k.startswith(self.scripts_dir))
+ (k, util.load_shell_content(v))
+ for k, v in found.items()
+ if k.startswith(self.scripts_dir)
+ )
try:
self.maxDiff = None
self.assertEqual(expected_d, scripts_found)
@@ -3666,9 +4422,14 @@ class TestOpenSuseSysConfigRendering(CiTestCase):
self.maxDiff = orig_maxdiff
def _assert_headers(self, found):
- missing = [f for f in found
- if (f.startswith(self.scripts_dir) and
- not found[f].startswith(self.header))]
+ missing = [
+ f
+ for f in found
+ if (
+ f.startswith(self.scripts_dir)
+ and not found[f].startswith(self.header)
+ )
+ ]
if missing:
raise AssertionError("Missing headers in: %s" % missing)
@@ -3676,16 +4437,22 @@ class TestOpenSuseSysConfigRendering(CiTestCase):
@mock.patch("cloudinit.net.sys_dev_path")
@mock.patch("cloudinit.net.read_sys_net")
@mock.patch("cloudinit.net.get_devicelist")
- def test_default_generation(self, mock_get_devicelist,
- mock_read_sys_net,
- mock_sys_dev_path, m_get_cmdline):
+ def test_default_generation(
+ self,
+ mock_get_devicelist,
+ mock_read_sys_net,
+ mock_sys_dev_path,
+ m_get_cmdline,
+ ):
tmp_dir = self.tmp_dir()
- _setup_test(tmp_dir, mock_get_devicelist,
- mock_read_sys_net, mock_sys_dev_path)
+ _setup_test(
+ tmp_dir, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path
+ )
network_cfg = net.generate_fallback_config()
- ns = network_state.parse_net_config_data(network_cfg,
- skip_broken=False)
+ ns = network_state.parse_net_config_data(
+ network_cfg, skip_broken=False
+ )
render_dir = os.path.join(tmp_dir, "render")
os.makedirs(render_dir)
@@ -3693,7 +4460,7 @@ class TestOpenSuseSysConfigRendering(CiTestCase):
renderer = self._get_renderer()
renderer.render_network_state(ns, target=render_dir)
- render_file = 'etc/sysconfig/network/ifcfg-eth1000'
+ render_file = "etc/sysconfig/network/ifcfg-eth1000"
with open(os.path.join(render_dir, render_file)) as fh:
content = fh.read()
expected_content = """
@@ -3707,98 +4474,101 @@ STARTMODE=auto
# TODO(rjschwei): re-enable test once route writing is implemented
# for SUSE distros
-# def test_multiple_ipv4_default_gateways(self):
-# """ValueError is raised when duplicate ipv4 gateways exist."""
-# net_json = {
-# "services": [{"type": "dns", "address": "172.19.0.12"}],
-# "networks": [{
-# "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4",
-# "type": "ipv4", "netmask": "255.255.252.0",
-# "link": "tap1a81968a-79",
-# "routes": [{
-# "netmask": "0.0.0.0",
-# "network": "0.0.0.0",
-# "gateway": "172.19.3.254",
-# }, {
-# "netmask": "0.0.0.0", # A second default gateway
-# "network": "0.0.0.0",
-# "gateway": "172.20.3.254",
-# }],
-# "ip_address": "172.19.1.34", "id": "network0"
-# }],
-# "links": [
-# {
-# "ethernet_mac_address": "fa:16:3e:ed:9a:59",
-# "mtu": None, "type": "bridge", "id":
-# "tap1a81968a-79",
-# "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f"
-# },
-# ],
-# }
-# macs = {'fa:16:3e:ed:9a:59': 'eth0'}
-# render_dir = self.tmp_dir()
-# network_cfg = openstack.convert_net_json(net_json, known_macs=macs)
-# ns = network_state.parse_net_config_data(network_cfg,
-# skip_broken=False)
-# renderer = self._get_renderer()
-# with self.assertRaises(ValueError):
-# renderer.render_network_state(ns, target=render_dir)
-# self.assertEqual([], os.listdir(render_dir))
-#
-# def test_multiple_ipv6_default_gateways(self):
-# """ValueError is raised when duplicate ipv6 gateways exist."""
-# net_json = {
-# "services": [{"type": "dns", "address": "172.19.0.12"}],
-# "networks": [{
-# "network_id": "public-ipv6",
-# "type": "ipv6", "netmask": "",
-# "link": "tap1a81968a-79",
-# "routes": [{
-# "gateway": "2001:DB8::1",
-# "netmask": "::",
-# "network": "::"
-# }, {
-# "gateway": "2001:DB9::1",
-# "netmask": "::",
-# "network": "::"
-# }],
-# "ip_address": "2001:DB8::10", "id": "network1"
-# }],
-# "links": [
-# {
-# "ethernet_mac_address": "fa:16:3e:ed:9a:59",
-# "mtu": None, "type": "bridge", "id":
-# "tap1a81968a-79",
-# "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f"
-# },
-# ],
-# }
-# macs = {'fa:16:3e:ed:9a:59': 'eth0'}
-# render_dir = self.tmp_dir()
-# network_cfg = openstack.convert_net_json(net_json, known_macs=macs)
-# ns = network_state.parse_net_config_data(network_cfg,
-# skip_broken=False)
-# renderer = self._get_renderer()
-# with self.assertRaises(ValueError):
-# renderer.render_network_state(ns, target=render_dir)
-# self.assertEqual([], os.listdir(render_dir))
+ # def test_multiple_ipv4_default_gateways(self):
+ # """ValueError is raised when duplicate ipv4 gateways exist."""
+ # net_json = {
+ # "services": [{"type": "dns", "address": "172.19.0.12"}],
+ # "networks": [{
+ # "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4",
+ # "type": "ipv4", "netmask": "255.255.252.0",
+ # "link": "tap1a81968a-79",
+ # "routes": [{
+ # "netmask": "0.0.0.0",
+ # "network": "0.0.0.0",
+ # "gateway": "172.19.3.254",
+ # }, {
+ # "netmask": "0.0.0.0", # A second default gateway
+ # "network": "0.0.0.0",
+ # "gateway": "172.20.3.254",
+ # }],
+ # "ip_address": "172.19.1.34", "id": "network0"
+ # }],
+ # "links": [
+ # {
+ # "ethernet_mac_address": "fa:16:3e:ed:9a:59",
+ # "mtu": None, "type": "bridge", "id":
+ # "tap1a81968a-79",
+ # "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f"
+ # },
+ # ],
+ # }
+ # macs = {'fa:16:3e:ed:9a:59': 'eth0'}
+ # render_dir = self.tmp_dir()
+ # network_cfg = openstack.convert_net_json(net_json, known_macs=macs) # noqa: E501
+ # ns = network_state.parse_net_config_data(network_cfg,
+ # skip_broken=False)
+ # renderer = self._get_renderer()
+ # with self.assertRaises(ValueError):
+ # renderer.render_network_state(ns, target=render_dir)
+ # self.assertEqual([], os.listdir(render_dir))
+ #
+ # def test_multiple_ipv6_default_gateways(self):
+ # """ValueError is raised when duplicate ipv6 gateways exist."""
+ # net_json = {
+ # "services": [{"type": "dns", "address": "172.19.0.12"}],
+ # "networks": [{
+ # "network_id": "public-ipv6",
+ # "type": "ipv6", "netmask": "",
+ # "link": "tap1a81968a-79",
+ # "routes": [{
+ # "gateway": "2001:DB8::1",
+ # "netmask": "::",
+ # "network": "::"
+ # }, {
+ # "gateway": "2001:DB9::1",
+ # "netmask": "::",
+ # "network": "::"
+ # }],
+ # "ip_address": "2001:DB8::10", "id": "network1"
+ # }],
+ # "links": [
+ # {
+ # "ethernet_mac_address": "fa:16:3e:ed:9a:59",
+ # "mtu": None, "type": "bridge", "id":
+ # "tap1a81968a-79",
+ # "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f"
+ # },
+ # ],
+ # }
+ # macs = {'fa:16:3e:ed:9a:59': 'eth0'}
+ # render_dir = self.tmp_dir()
+ # network_cfg = openstack.convert_net_json(net_json, known_macs=macs) # noqa: E501
+ # ns = network_state.parse_net_config_data(network_cfg,
+ # skip_broken=False)
+ # renderer = self._get_renderer()
+ # with self.assertRaises(ValueError):
+ # renderer.render_network_state(ns, target=render_dir)
+ # self.assertEqual([], os.listdir(render_dir))
def test_openstack_rendering_samples(self):
for os_sample in OS_SAMPLES:
render_dir = self.tmp_dir()
- ex_input = os_sample['in_data']
- ex_mac_addrs = os_sample['in_macs']
+ ex_input = os_sample["in_data"]
+ ex_mac_addrs = os_sample["in_macs"]
network_cfg = openstack.convert_net_json(
- ex_input, known_macs=ex_mac_addrs)
- ns = network_state.parse_net_config_data(network_cfg,
- skip_broken=False)
+ ex_input, known_macs=ex_mac_addrs
+ )
+ ns = network_state.parse_net_config_data(
+ network_cfg, skip_broken=False
+ )
renderer = self._get_renderer()
# render a multiple times to simulate reboots
renderer.render_network_state(ns, target=render_dir)
renderer.render_network_state(ns, target=render_dir)
renderer.render_network_state(ns, target=render_dir)
- for fn, expected_content in os_sample.get('out_sysconfig_opensuse',
- []):
+ for fn, expected_content in os_sample.get(
+ "out_sysconfig_opensuse", []
+ ):
with open(os.path.join(render_dir, fn)) as fh:
self.assertEqual(expected_content, fh.read())
@@ -3809,8 +4579,8 @@ STARTMODE=auto
renderer = self._get_renderer()
renderer.render_network_state(ns, target=render_dir)
found = dir2dict(render_dir)
- nspath = '/etc/sysconfig/network/'
- self.assertNotIn(nspath + 'ifcfg-lo', found.keys())
+ nspath = "/etc/sysconfig/network/"
+ self.assertNotIn(nspath + "ifcfg-lo", found.keys())
expected = """\
# Created by cloud-init on instance boot automatically, do not edit.
#
@@ -3820,10 +4590,10 @@ LLADDR=52:54:00:12:34:00
NETMASK=255.255.255.0
STARTMODE=auto
"""
- self.assertEqual(expected, found[nspath + 'ifcfg-interface0'])
+ self.assertEqual(expected, found[nspath + "ifcfg-interface0"])
# The configuration has no nameserver information make sure we
# do not write the resolv.conf file
- respath = '/etc/resolv.conf'
+ respath = "/etc/resolv.conf"
self.assertNotIn(respath, found.keys())
def test_config_with_explicit_loopback(self):
@@ -3831,33 +4601,33 @@ STARTMODE=auto
render_dir = self.tmp_path("render")
os.makedirs(render_dir)
# write an etc/resolv.conf and expect it to not be modified
- resolvconf = os.path.join(render_dir, 'etc/resolv.conf')
+ resolvconf = os.path.join(render_dir, "etc/resolv.conf")
resolvconf_content = "# Original Content"
util.write_file(resolvconf, resolvconf_content)
renderer = self._get_renderer()
renderer.render_network_state(ns, target=render_dir)
found = dir2dict(render_dir)
- nspath = '/etc/sysconfig/network/'
- self.assertNotIn(nspath + 'ifcfg-lo', found.keys())
+ nspath = "/etc/sysconfig/network/"
+ self.assertNotIn(nspath + "ifcfg-lo", found.keys())
expected = """\
# Created by cloud-init on instance boot automatically, do not edit.
#
BOOTPROTO=dhcp
STARTMODE=auto
"""
- self.assertEqual(expected, found[nspath + 'ifcfg-eth0'])
+ self.assertEqual(expected, found[nspath + "ifcfg-eth0"])
# a dhcp only config should not modify resolv.conf
- self.assertEqual(resolvconf_content, found['/etc/resolv.conf'])
+ self.assertEqual(resolvconf_content, found["/etc/resolv.conf"])
def test_bond_config(self):
- expected_name = 'expected_sysconfig_opensuse'
- entry = NETWORK_CONFIGS['bond']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ expected_name = "expected_sysconfig_opensuse"
+ entry = NETWORK_CONFIGS["bond"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
for fname, contents in entry[expected_name].items():
print(fname)
print(contents)
print()
- print('-- expected ^ | v rendered --')
+ print("-- expected ^ | v rendered --")
for fname, contents in found.items():
print(fname)
print(contents)
@@ -3866,120 +4636,129 @@ STARTMODE=auto
self._assert_headers(found)
def test_vlan_config(self):
- entry = NETWORK_CONFIGS['vlan']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["vlan"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_bridge_config(self):
- entry = NETWORK_CONFIGS['bridge']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["bridge"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_manual_config(self):
- entry = NETWORK_CONFIGS['manual']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["manual"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_all_config(self):
- entry = NETWORK_CONFIGS['all']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["all"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
self.assertNotIn(
- 'WARNING: Network config: ignoring eth0.101 device-level mtu',
- self.logs.getvalue())
+ "WARNING: Network config: ignoring eth0.101 device-level mtu",
+ self.logs.getvalue(),
+ )
def test_small_config(self):
- entry = NETWORK_CONFIGS['small']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["small"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_v4_and_v6_static_config(self):
- entry = NETWORK_CONFIGS['v4_and_v6_static']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["v4_and_v6_static"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
expected_msg = (
- 'WARNING: Network config: ignoring iface0 device-level mtu:8999'
- ' because ipv4 subnet-level mtu:9000 provided.')
+ "WARNING: Network config: ignoring iface0 device-level mtu:8999"
+ " because ipv4 subnet-level mtu:9000 provided."
+ )
self.assertIn(expected_msg, self.logs.getvalue())
def test_dhcpv6_only_config(self):
- entry = NETWORK_CONFIGS['dhcpv6_only']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["dhcpv6_only"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_simple_render_ipv6_slaac(self):
- entry = NETWORK_CONFIGS['ipv6_slaac']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["ipv6_slaac"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_dhcpv6_stateless_config(self):
- entry = NETWORK_CONFIGS['dhcpv6_stateless']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["dhcpv6_stateless"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_wakeonlan_disabled_config_v2(self):
- entry = NETWORK_CONFIGS['wakeonlan_disabled']
- found = self._render_and_read(network_config=yaml.load(
- entry['yaml_v2']))
+ entry = NETWORK_CONFIGS["wakeonlan_disabled"]
+ found = self._render_and_read(
+ network_config=yaml.load(entry["yaml_v2"])
+ )
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_wakeonlan_enabled_config_v2(self):
- entry = NETWORK_CONFIGS['wakeonlan_enabled']
- found = self._render_and_read(network_config=yaml.load(
- entry['yaml_v2']))
+ entry = NETWORK_CONFIGS["wakeonlan_enabled"]
+ found = self._render_and_read(
+ network_config=yaml.load(entry["yaml_v2"])
+ )
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_render_v4_and_v6(self):
- entry = NETWORK_CONFIGS['v4_and_v6']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["v4_and_v6"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_render_v6_and_v4(self):
- entry = NETWORK_CONFIGS['v6_and_v4']
- found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["v6_and_v4"]
+ found = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
class TestEniNetRendering(CiTestCase):
-
@mock.patch("cloudinit.net.util.get_cmdline", return_value="root=myroot")
@mock.patch("cloudinit.net.sys_dev_path")
@mock.patch("cloudinit.net.read_sys_net")
@mock.patch("cloudinit.net.get_devicelist")
- def test_default_generation(self, mock_get_devicelist,
- mock_read_sys_net,
- mock_sys_dev_path, m_get_cmdline):
+ def test_default_generation(
+ self,
+ mock_get_devicelist,
+ mock_read_sys_net,
+ mock_sys_dev_path,
+ m_get_cmdline,
+ ):
tmp_dir = self.tmp_dir()
- _setup_test(tmp_dir, mock_get_devicelist,
- mock_read_sys_net, mock_sys_dev_path)
+ _setup_test(
+ tmp_dir, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path
+ )
network_cfg = net.generate_fallback_config()
- ns = network_state.parse_net_config_data(network_cfg,
- skip_broken=False)
+ ns = network_state.parse_net_config_data(
+ network_cfg, skip_broken=False
+ )
render_dir = os.path.join(tmp_dir, "render")
os.makedirs(render_dir)
renderer = eni.Renderer(
- {'eni_path': 'interfaces', 'netrules_path': None})
+ {"eni_path": "interfaces", "netrules_path": None}
+ )
renderer.render_network_state(ns, target=render_dir)
- self.assertTrue(os.path.exists(os.path.join(render_dir,
- 'interfaces')))
- with open(os.path.join(render_dir, 'interfaces')) as fh:
+ self.assertTrue(os.path.exists(os.path.join(render_dir, "interfaces")))
+ with open(os.path.join(render_dir, "interfaces")) as fh:
contents = fh.read()
expected = """
@@ -4004,62 +4783,74 @@ auto eth0
iface eth0 inet dhcp
"""
self.assertEqual(
- expected, dir2dict(tmp_dir)['/etc/network/interfaces'])
+ expected, dir2dict(tmp_dir)["/etc/network/interfaces"]
+ )
def test_v2_route_metric_to_eni(self):
"""Network v2 route-metric overrides are preserved in eni output"""
tmp_dir = self.tmp_dir()
renderer = eni.Renderer()
- expected_tmpl = textwrap.dedent("""\
+ expected_tmpl = textwrap.dedent(
+ """\
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet{suffix} dhcp
metric 100
- """)
- for dhcp_ver in ('dhcp4', 'dhcp6'):
- suffix = '6' if dhcp_ver == 'dhcp6' else ''
+ """
+ )
+ for dhcp_ver in ("dhcp4", "dhcp6"):
+ suffix = "6" if dhcp_ver == "dhcp6" else ""
dhcp_cfg = {
dhcp_ver: True,
- '{ver}-overrides'.format(ver=dhcp_ver): {'route-metric': 100}}
- v2_input = {'version': 2, 'ethernets': {'eth0': dhcp_cfg}}
+ "{ver}-overrides".format(ver=dhcp_ver): {"route-metric": 100},
+ }
+ v2_input = {"version": 2, "ethernets": {"eth0": dhcp_cfg}}
ns = network_state.parse_net_config_data(v2_input)
renderer.render_network_state(ns, target=tmp_dir)
self.assertEqual(
expected_tmpl.format(suffix=suffix),
- dir2dict(tmp_dir)['/etc/network/interfaces'])
+ dir2dict(tmp_dir)["/etc/network/interfaces"],
+ )
class TestNetplanNetRendering(CiTestCase):
-
@mock.patch("cloudinit.net.util.get_cmdline", return_value="root=myroot")
@mock.patch("cloudinit.net.netplan._clean_default")
@mock.patch("cloudinit.net.sys_dev_path")
@mock.patch("cloudinit.net.read_sys_net")
@mock.patch("cloudinit.net.get_devicelist")
- def test_default_generation(self, mock_get_devicelist,
- mock_read_sys_net,
- mock_sys_dev_path,
- mock_clean_default, m_get_cmdline):
+ def test_default_generation(
+ self,
+ mock_get_devicelist,
+ mock_read_sys_net,
+ mock_sys_dev_path,
+ mock_clean_default,
+ m_get_cmdline,
+ ):
tmp_dir = self.tmp_dir()
- _setup_test(tmp_dir, mock_get_devicelist,
- mock_read_sys_net, mock_sys_dev_path)
+ _setup_test(
+ tmp_dir, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path
+ )
network_cfg = net.generate_fallback_config()
- ns = network_state.parse_net_config_data(network_cfg,
- skip_broken=False)
+ ns = network_state.parse_net_config_data(
+ network_cfg, skip_broken=False
+ )
render_dir = os.path.join(tmp_dir, "render")
os.makedirs(render_dir)
- render_target = 'netplan.yaml'
+ render_target = "netplan.yaml"
renderer = netplan.Renderer(
- {'netplan_path': render_target, 'postcmds': False})
+ {"netplan_path": render_target, "postcmds": False}
+ )
renderer.render_network_state(ns, target=render_dir)
- self.assertTrue(os.path.exists(os.path.join(render_dir,
- render_target)))
+ self.assertTrue(
+ os.path.exists(os.path.join(render_dir, render_target))
+ )
with open(os.path.join(render_dir, render_target)) as fh:
contents = fh.read()
print(contents)
@@ -4079,8 +4870,9 @@ network:
class TestNetplanCleanDefault(CiTestCase):
- snapd_known_path = 'etc/netplan/00-snapd-config.yaml'
- snapd_known_content = textwrap.dedent("""\
+ snapd_known_path = "etc/netplan/00-snapd-config.yaml"
+ snapd_known_content = textwrap.dedent(
+ """\
# This is the initial network config.
# It can be overwritten by cloud-init or console-conf.
network:
@@ -4094,15 +4886,18 @@ class TestNetplanCleanDefault(CiTestCase):
match:
name: "eth*"
dhcp4: true
- """)
+ """
+ )
stub_known = {
- 'run/systemd/network/10-netplan-all-en.network': 'foo-en',
- 'run/systemd/network/10-netplan-all-eth.network': 'foo-eth',
- 'run/systemd/generator/netplan.stamp': 'stamp',
+ "run/systemd/network/10-netplan-all-en.network": "foo-en",
+ "run/systemd/network/10-netplan-all-eth.network": "foo-eth",
+ "run/systemd/generator/netplan.stamp": "stamp",
}
def test_clean_known_config_cleaned(self):
- content = {self.snapd_known_path: self.snapd_known_content, }
+ content = {
+ self.snapd_known_path: self.snapd_known_content,
+ }
content.update(self.stub_known)
tmpd = self.tmp_dir()
files = sorted(populate_dir(tmpd, content))
@@ -4111,7 +4906,9 @@ class TestNetplanCleanDefault(CiTestCase):
self.assertEqual([], found)
def test_clean_unknown_config_not_cleaned(self):
- content = {self.snapd_known_path: self.snapd_known_content, }
+ content = {
+ self.snapd_known_path: self.snapd_known_content,
+ }
content.update(self.stub_known)
content[self.snapd_known_path] += "# user put a comment\n"
tmpd = self.tmp_dir()
@@ -4142,78 +4939,100 @@ class TestNetplanCleanDefault(CiTestCase):
class TestNetplanPostcommands(CiTestCase):
mycfg = {
- 'config': [{"type": "physical", "name": "eth0",
- "mac_address": "c0:d6:9f:2c:e8:80",
- "subnets": [{"type": "dhcp"}]}],
- 'version': 1}
-
- @mock.patch.object(netplan.Renderer, '_netplan_generate')
- @mock.patch.object(netplan.Renderer, '_net_setup_link')
- @mock.patch('cloudinit.subp.subp')
- def test_netplan_render_calls_postcmds(self, mock_subp,
- mock_netplan_generate,
- mock_net_setup_link):
+ "config": [
+ {
+ "type": "physical",
+ "name": "eth0",
+ "mac_address": "c0:d6:9f:2c:e8:80",
+ "subnets": [{"type": "dhcp"}],
+ }
+ ],
+ "version": 1,
+ }
+
+ @mock.patch.object(netplan.Renderer, "_netplan_generate")
+ @mock.patch.object(netplan.Renderer, "_net_setup_link")
+ @mock.patch("cloudinit.subp.subp")
+ def test_netplan_render_calls_postcmds(
+ self, mock_subp, mock_netplan_generate, mock_net_setup_link
+ ):
tmp_dir = self.tmp_dir()
- ns = network_state.parse_net_config_data(self.mycfg,
- skip_broken=False)
+ ns = network_state.parse_net_config_data(self.mycfg, skip_broken=False)
render_dir = os.path.join(tmp_dir, "render")
os.makedirs(render_dir)
- render_target = 'netplan.yaml'
+ render_target = "netplan.yaml"
renderer = netplan.Renderer(
- {'netplan_path': render_target, 'postcmds': True})
+ {"netplan_path": render_target, "postcmds": True}
+ )
mock_subp.side_effect = iter([subp.ProcessExecutionError])
renderer.render_network_state(ns, target=render_dir)
mock_netplan_generate.assert_called_with(run=True)
mock_net_setup_link.assert_called_with(run=True)
- @mock.patch('cloudinit.util.SeLinuxGuard')
+ @mock.patch("cloudinit.util.SeLinuxGuard")
@mock.patch.object(netplan, "get_devicelist")
- @mock.patch('cloudinit.subp.subp')
+ @mock.patch("cloudinit.subp.subp")
def test_netplan_postcmds(self, mock_subp, mock_devlist, mock_sel):
mock_sel.__enter__ = mock.Mock(return_value=False)
mock_sel.__exit__ = mock.Mock()
- mock_devlist.side_effect = [['lo']]
+ mock_devlist.side_effect = [["lo"]]
tmp_dir = self.tmp_dir()
- ns = network_state.parse_net_config_data(self.mycfg,
- skip_broken=False)
+ ns = network_state.parse_net_config_data(self.mycfg, skip_broken=False)
render_dir = os.path.join(tmp_dir, "render")
os.makedirs(render_dir)
- render_target = 'netplan.yaml'
+ render_target = "netplan.yaml"
renderer = netplan.Renderer(
- {'netplan_path': render_target, 'postcmds': True})
- mock_subp.side_effect = iter([
- subp.ProcessExecutionError,
- ('', ''),
- ('', ''),
- ])
+ {"netplan_path": render_target, "postcmds": True}
+ )
+ mock_subp.side_effect = iter(
+ [
+ subp.ProcessExecutionError,
+ ("", ""),
+ ("", ""),
+ ]
+ )
expected = [
- mock.call(['netplan', 'info'], capture=True),
- mock.call(['netplan', 'generate'], capture=True),
- mock.call(['udevadm', 'test-builtin', 'net_setup_link',
- '/sys/class/net/lo'], capture=True),
+ mock.call(["netplan", "info"], capture=True),
+ mock.call(["netplan", "generate"], capture=True),
+ mock.call(
+ [
+ "udevadm",
+ "test-builtin",
+ "net_setup_link",
+ "/sys/class/net/lo",
+ ],
+ capture=True,
+ ),
]
- with mock.patch.object(os.path, 'islink', return_value=True):
+ with mock.patch.object(os.path, "islink", return_value=True):
renderer.render_network_state(ns, target=render_dir)
mock_subp.assert_has_calls(expected)
class TestEniNetworkStateToEni(CiTestCase):
mycfg = {
- 'config': [{"type": "physical", "name": "eth0",
- "mac_address": "c0:d6:9f:2c:e8:80",
- "subnets": [{"type": "dhcp"}]}],
- 'version': 1}
- my_mac = 'c0:d6:9f:2c:e8:80'
+ "config": [
+ {
+ "type": "physical",
+ "name": "eth0",
+ "mac_address": "c0:d6:9f:2c:e8:80",
+ "subnets": [{"type": "dhcp"}],
+ }
+ ],
+ "version": 1,
+ }
+ my_mac = "c0:d6:9f:2c:e8:80"
def test_no_header(self):
rendered = eni.network_state_to_eni(
network_state=network_state.parse_net_config_data(self.mycfg),
- render_hwaddress=True)
+ render_hwaddress=True,
+ )
self.assertIn(self.my_mac, rendered)
self.assertIn("hwaddress", rendered)
@@ -4221,14 +5040,17 @@ class TestEniNetworkStateToEni(CiTestCase):
header = "# hello world\n"
rendered = eni.network_state_to_eni(
network_state=network_state.parse_net_config_data(self.mycfg),
- header=header, render_hwaddress=True)
+ header=header,
+ render_hwaddress=True,
+ )
self.assertIn(header, rendered)
self.assertIn(self.my_mac, rendered)
def test_no_hwaddress(self):
rendered = eni.network_state_to_eni(
network_state=network_state.parse_net_config_data(self.mycfg),
- render_hwaddress=False)
+ render_hwaddress=False,
+ )
self.assertNotIn(self.my_mac, rendered)
self.assertNotIn("hwaddress", rendered)
@@ -4237,156 +5059,241 @@ class TestCmdlineConfigParsing(CiTestCase):
with_logs = True
simple_cfg = {
- 'config': [{"type": "physical", "name": "eth0",
- "mac_address": "c0:d6:9f:2c:e8:80",
- "subnets": [{"type": "dhcp"}]}]}
+ "config": [
+ {
+ "type": "physical",
+ "name": "eth0",
+ "mac_address": "c0:d6:9f:2c:e8:80",
+ "subnets": [{"type": "dhcp"}],
+ }
+ ]
+ }
def test_cmdline_convert_dhcp(self):
found = cmdline._klibc_to_config_entry(DHCP_CONTENT_1)
- self.assertEqual(found, ('eth0', DHCP_EXPECTED_1))
+ self.assertEqual(found, ("eth0", DHCP_EXPECTED_1))
def test_cmdline_convert_dhcp6(self):
found = cmdline._klibc_to_config_entry(DHCP6_CONTENT_1)
- self.assertEqual(found, ('eno1', DHCP6_EXPECTED_1))
+ self.assertEqual(found, ("eno1", DHCP6_EXPECTED_1))
def test_cmdline_convert_static(self):
found = cmdline._klibc_to_config_entry(STATIC_CONTENT_1)
- self.assertEqual(found, ('eth1', STATIC_EXPECTED_1))
+ self.assertEqual(found, ("eth1", STATIC_EXPECTED_1))
def test_config_from_cmdline_net_cfg(self):
files = []
- pairs = (('net-eth0.cfg', DHCP_CONTENT_1),
- ('net-eth1.cfg', STATIC_CONTENT_1))
+ pairs = (
+ ("net-eth0.cfg", DHCP_CONTENT_1),
+ ("net-eth1.cfg", STATIC_CONTENT_1),
+ )
- macs = {'eth1': 'b8:ae:ed:75:ff:2b',
- 'eth0': 'b8:ae:ed:75:ff:2a'}
+ macs = {"eth1": "b8:ae:ed:75:ff:2b", "eth0": "b8:ae:ed:75:ff:2a"}
dhcp = copy.deepcopy(DHCP_EXPECTED_1)
- dhcp['mac_address'] = macs['eth0']
+ dhcp["mac_address"] = macs["eth0"]
static = copy.deepcopy(STATIC_EXPECTED_1)
- static['mac_address'] = macs['eth1']
+ static["mac_address"] = macs["eth1"]
- expected = {'version': 1, 'config': [dhcp, static]}
+ expected = {"version": 1, "config": [dhcp, static]}
with temp_utils.tempdir() as tmpd:
for fname, content in pairs:
fp = os.path.join(tmpd, fname)
files.append(fp)
util.write_file(fp, content)
- found = cmdline.config_from_klibc_net_cfg(files=files,
- mac_addrs=macs)
+ found = cmdline.config_from_klibc_net_cfg(
+ files=files, mac_addrs=macs
+ )
self.assertEqual(found, expected)
def test_cmdline_with_b64(self):
data = base64.b64encode(json.dumps(self.simple_cfg).encode())
encoded_text = data.decode()
- raw_cmdline = 'ro network-config=' + encoded_text + ' root=foo'
+ raw_cmdline = "ro network-config=" + encoded_text + " root=foo"
found = cmdline.read_kernel_cmdline_config(cmdline=raw_cmdline)
self.assertEqual(found, self.simple_cfg)
def test_cmdline_with_net_config_disabled(self):
- raw_cmdline = 'ro network-config=disabled root=foo'
+ raw_cmdline = "ro network-config=disabled root=foo"
found = cmdline.read_kernel_cmdline_config(cmdline=raw_cmdline)
- self.assertEqual(found, {'config': 'disabled'})
+ self.assertEqual(found, {"config": "disabled"})
def test_cmdline_with_net_config_unencoded_logs_error(self):
"""network-config cannot be unencoded besides 'disabled'."""
- raw_cmdline = 'ro network-config={config:disabled} root=foo'
+ raw_cmdline = "ro network-config={config:disabled} root=foo"
found = cmdline.read_kernel_cmdline_config(cmdline=raw_cmdline)
self.assertIsNone(found)
expected_log = (
- 'ERROR: Expected base64 encoded kernel commandline parameter'
- ' network-config. Ignoring network-config={config:disabled}.')
+ "ERROR: Expected base64 encoded kernel commandline parameter"
+ " network-config. Ignoring network-config={config:disabled}."
+ )
self.assertIn(expected_log, self.logs.getvalue())
def test_cmdline_with_b64_gz(self):
data = _gzip_data(json.dumps(self.simple_cfg).encode())
encoded_text = base64.b64encode(data).decode()
- raw_cmdline = 'ro network-config=' + encoded_text + ' root=foo'
+ raw_cmdline = "ro network-config=" + encoded_text + " root=foo"
found = cmdline.read_kernel_cmdline_config(cmdline=raw_cmdline)
self.assertEqual(found, self.simple_cfg)
class TestCmdlineKlibcNetworkConfigSource(FilesystemMockingTestCase):
macs = {
- 'eth0': '14:02:ec:42:48:00',
- 'eno1': '14:02:ec:42:48:01',
+ "eth0": "14:02:ec:42:48:00",
+ "eno1": "14:02:ec:42:48:01",
}
def test_without_ip(self):
- content = {'/run/net-eth0.conf': DHCP_CONTENT_1,
- cmdline._OPEN_ISCSI_INTERFACE_FILE: "eth0\n"}
+ content = {
+ "/run/net-eth0.conf": DHCP_CONTENT_1,
+ cmdline._OPEN_ISCSI_INTERFACE_FILE: "eth0\n",
+ }
exp1 = copy.deepcopy(DHCP_EXPECTED_1)
- exp1['mac_address'] = self.macs['eth0']
+ exp1["mac_address"] = self.macs["eth0"]
root = self.tmp_dir()
populate_dir(root, content)
self.reRoot(root)
src = cmdline.KlibcNetworkConfigSource(
- _cmdline='foo root=/root/bar', _mac_addrs=self.macs,
+ _cmdline="foo root=/root/bar",
+ _mac_addrs=self.macs,
)
self.assertTrue(src.is_applicable())
found = src.render_config()
- self.assertEqual(found['version'], 1)
- self.assertEqual(found['config'], [exp1])
+ self.assertEqual(found["version"], 1)
+ self.assertEqual(found["config"], [exp1])
def test_with_ip(self):
- content = {'/run/net-eth0.conf': DHCP_CONTENT_1}
+ content = {"/run/net-eth0.conf": DHCP_CONTENT_1}
exp1 = copy.deepcopy(DHCP_EXPECTED_1)
- exp1['mac_address'] = self.macs['eth0']
+ exp1["mac_address"] = self.macs["eth0"]
root = self.tmp_dir()
populate_dir(root, content)
self.reRoot(root)
src = cmdline.KlibcNetworkConfigSource(
- _cmdline='foo ip=dhcp', _mac_addrs=self.macs,
+ _cmdline="foo ip=dhcp",
+ _mac_addrs=self.macs,
)
self.assertTrue(src.is_applicable())
found = src.render_config()
- self.assertEqual(found['version'], 1)
- self.assertEqual(found['config'], [exp1])
+ self.assertEqual(found["version"], 1)
+ self.assertEqual(found["config"], [exp1])
def test_with_ip6(self):
- content = {'/run/net6-eno1.conf': DHCP6_CONTENT_1}
+ content = {"/run/net6-eno1.conf": DHCP6_CONTENT_1}
root = self.tmp_dir()
populate_dir(root, content)
self.reRoot(root)
src = cmdline.KlibcNetworkConfigSource(
- _cmdline='foo ip6=dhcp root=/dev/sda', _mac_addrs=self.macs,
+ _cmdline="foo ip6=dhcp root=/dev/sda",
+ _mac_addrs=self.macs,
)
self.assertTrue(src.is_applicable())
found = src.render_config()
self.assertEqual(
found,
- {'version': 1, 'config': [
- {'type': 'physical', 'name': 'eno1',
- 'mac_address': self.macs['eno1'],
- 'subnets': [
- {'dns_nameservers': ['2001:67c:1562:8010::2:1'],
- 'control': 'manual', 'type': 'dhcp6', 'netmask': '64'}]}]})
+ {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": "eno1",
+ "mac_address": self.macs["eno1"],
+ "subnets": [
+ {
+ "dns_nameservers": ["2001:67c:1562:8010::2:1"],
+ "control": "manual",
+ "type": "dhcp6",
+ "netmask": "64",
+ }
+ ],
+ }
+ ],
+ },
+ )
def test_with_no_ip_or_ip6(self):
# if there is no ip= or ip6= on cmdline, return value should be None
- content = {'net6-eno1.conf': DHCP6_CONTENT_1}
+ content = {"net6-eno1.conf": DHCP6_CONTENT_1}
files = sorted(populate_dir(self.tmp_dir(), content))
src = cmdline.KlibcNetworkConfigSource(
- _files=files, _cmdline='foo root=/dev/sda', _mac_addrs=self.macs,
+ _files=files,
+ _cmdline="foo root=/dev/sda",
+ _mac_addrs=self.macs,
+ )
+ self.assertFalse(src.is_applicable())
+
+ def test_with_faux_ip(self):
+ content = {"net6-eno1.conf": DHCP6_CONTENT_1}
+ files = sorted(populate_dir(self.tmp_dir(), content))
+ src = cmdline.KlibcNetworkConfigSource(
+ _files=files,
+ _cmdline="foo iscsi_target_ip=root=/dev/sda",
+ _mac_addrs=self.macs,
+ )
+ self.assertFalse(src.is_applicable())
+
+ def test_empty_cmdline(self):
+ content = {"net6-eno1.conf": DHCP6_CONTENT_1}
+ files = sorted(populate_dir(self.tmp_dir(), content))
+ src = cmdline.KlibcNetworkConfigSource(
+ _files=files,
+ _cmdline="",
+ _mac_addrs=self.macs,
+ )
+ self.assertFalse(src.is_applicable())
+
+ def test_whitespace_cmdline(self):
+ content = {"net6-eno1.conf": DHCP6_CONTENT_1}
+ files = sorted(populate_dir(self.tmp_dir(), content))
+ src = cmdline.KlibcNetworkConfigSource(
+ _files=files,
+ _cmdline=" ",
+ _mac_addrs=self.macs,
+ )
+ self.assertFalse(src.is_applicable())
+
+ def test_cmdline_no_lhand(self):
+ content = {"net6-eno1.conf": DHCP6_CONTENT_1}
+ files = sorted(populate_dir(self.tmp_dir(), content))
+ src = cmdline.KlibcNetworkConfigSource(
+ _files=files,
+ _cmdline="=wut",
+ _mac_addrs=self.macs,
+ )
+ self.assertFalse(src.is_applicable())
+
+ def test_cmdline_embedded_ip(self):
+ content = {"net6-eno1.conf": DHCP6_CONTENT_1}
+ files = sorted(populate_dir(self.tmp_dir(), content))
+ src = cmdline.KlibcNetworkConfigSource(
+ _files=files,
+ _cmdline='opt="some things and ip=foo"',
+ _mac_addrs=self.macs,
)
self.assertFalse(src.is_applicable())
def test_with_both_ip_ip6(self):
content = {
- '/run/net-eth0.conf': DHCP_CONTENT_1,
- '/run/net6-eth0.conf': DHCP6_CONTENT_1.replace('eno1', 'eth0')}
+ "/run/net-eth0.conf": DHCP_CONTENT_1,
+ "/run/net6-eth0.conf": DHCP6_CONTENT_1.replace("eno1", "eth0"),
+ }
eth0 = copy.deepcopy(DHCP_EXPECTED_1)
- eth0['mac_address'] = self.macs['eth0']
- eth0['subnets'].append(
- {'control': 'manual', 'type': 'dhcp6',
- 'netmask': '64', 'dns_nameservers': ['2001:67c:1562:8010::2:1']})
+ eth0["mac_address"] = self.macs["eth0"]
+ eth0["subnets"].append(
+ {
+ "control": "manual",
+ "type": "dhcp6",
+ "netmask": "64",
+ "dns_nameservers": ["2001:67c:1562:8010::2:1"],
+ }
+ )
expected = [eth0]
root = self.tmp_dir()
@@ -4394,17 +5301,17 @@ class TestCmdlineKlibcNetworkConfigSource(FilesystemMockingTestCase):
self.reRoot(root)
src = cmdline.KlibcNetworkConfigSource(
- _cmdline='foo ip=dhcp ip6=dhcp', _mac_addrs=self.macs,
+ _cmdline="foo ip=dhcp ip6=dhcp",
+ _mac_addrs=self.macs,
)
self.assertTrue(src.is_applicable())
found = src.render_config()
- self.assertEqual(found['version'], 1)
- self.assertEqual(found['config'], expected)
+ self.assertEqual(found["version"], 1)
+ self.assertEqual(found["config"], expected)
class TestReadInitramfsConfig(CiTestCase):
-
def _config_source_cls_mock(self, is_applicable, render_config=None):
return lambda: mock.Mock(
is_applicable=lambda: is_applicable,
@@ -4412,7 +5319,7 @@ class TestReadInitramfsConfig(CiTestCase):
)
def test_no_sources(self):
- with mock.patch('cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES', []):
+ with mock.patch("cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES", []):
self.assertIsNone(cmdline.read_initramfs_config())
def test_no_applicable_sources(self):
@@ -4421,19 +5328,22 @@ class TestReadInitramfsConfig(CiTestCase):
self._config_source_cls_mock(is_applicable=False),
self._config_source_cls_mock(is_applicable=False),
]
- with mock.patch('cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES',
- sources):
+ with mock.patch(
+ "cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES", sources
+ ):
self.assertIsNone(cmdline.read_initramfs_config())
def test_one_applicable_source(self):
expected_config = object()
sources = [
self._config_source_cls_mock(
- is_applicable=True, render_config=expected_config,
+ is_applicable=True,
+ render_config=expected_config,
),
]
- with mock.patch('cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES',
- sources):
+ with mock.patch(
+ "cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES", sources
+ ):
self.assertEqual(expected_config, cmdline.read_initramfs_config())
def test_one_applicable_source_after_inapplicable_sources(self):
@@ -4442,45 +5352,53 @@ class TestReadInitramfsConfig(CiTestCase):
self._config_source_cls_mock(is_applicable=False),
self._config_source_cls_mock(is_applicable=False),
self._config_source_cls_mock(
- is_applicable=True, render_config=expected_config,
+ is_applicable=True,
+ render_config=expected_config,
),
]
- with mock.patch('cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES',
- sources):
+ with mock.patch(
+ "cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES", sources
+ ):
self.assertEqual(expected_config, cmdline.read_initramfs_config())
def test_first_applicable_source_is_used(self):
first_config, second_config = object(), object()
sources = [
self._config_source_cls_mock(
- is_applicable=True, render_config=first_config,
+ is_applicable=True,
+ render_config=first_config,
),
self._config_source_cls_mock(
- is_applicable=True, render_config=second_config,
+ is_applicable=True,
+ render_config=second_config,
),
]
- with mock.patch('cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES',
- sources):
+ with mock.patch(
+ "cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES", sources
+ ):
self.assertEqual(first_config, cmdline.read_initramfs_config())
class TestNetplanRoundTrip(CiTestCase):
- NETPLAN_INFO_OUT = textwrap.dedent("""
+ NETPLAN_INFO_OUT = textwrap.dedent(
+ """
netplan.io:
features:
- dhcp-use-domains
- ipv6-mtu
website: https://netplan.io/
- """)
+ """
+ )
def setUp(self):
super(TestNetplanRoundTrip, self).setUp()
- self.add_patch('cloudinit.net.netplan.subp.subp', 'm_subp')
- self.m_subp.return_value = (self.NETPLAN_INFO_OUT, '')
+ self.add_patch("cloudinit.net.netplan.subp.subp", "m_subp")
+ self.m_subp.return_value = (self.NETPLAN_INFO_OUT, "")
- def _render_and_read(self, network_config=None, state=None,
- netplan_path=None, target=None):
+ def _render_and_read(
+ self, network_config=None, state=None, netplan_path=None, target=None
+ ):
if target is None:
target = self.tmp_dir()
@@ -4492,188 +5410,212 @@ class TestNetplanRoundTrip(CiTestCase):
raise ValueError("Expected data or state, got neither")
if netplan_path is None:
- netplan_path = 'etc/netplan/50-cloud-init.yaml'
+ netplan_path = "etc/netplan/50-cloud-init.yaml"
- renderer = netplan.Renderer(
- config={'netplan_path': netplan_path})
+ renderer = netplan.Renderer(config={"netplan_path": netplan_path})
renderer.render_network_state(ns, target=target)
return dir2dict(target)
def testsimple_render_bond_netplan(self):
- entry = NETWORK_CONFIGS['bond']
- files = self._render_and_read(network_config=yaml.load(entry['yaml']))
- print(entry['expected_netplan'])
- print('-- expected ^ | v rendered --')
- print(files['/etc/netplan/50-cloud-init.yaml'])
+ entry = NETWORK_CONFIGS["bond"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
+ print(entry["expected_netplan"])
+ print("-- expected ^ | v rendered --")
+ print(files["/etc/netplan/50-cloud-init.yaml"])
self.assertEqual(
- entry['expected_netplan'].splitlines(),
- files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+ entry["expected_netplan"].splitlines(),
+ files["/etc/netplan/50-cloud-init.yaml"].splitlines(),
+ )
def testsimple_render_bond_v2_input_netplan(self):
- entry = NETWORK_CONFIGS['bond']
+ entry = NETWORK_CONFIGS["bond"]
files = self._render_and_read(
- network_config=yaml.load(entry['yaml-v2']))
- print(entry['expected_netplan-v2'])
- print('-- expected ^ | v rendered --')
- print(files['/etc/netplan/50-cloud-init.yaml'])
+ network_config=yaml.load(entry["yaml-v2"])
+ )
+ print(entry["expected_netplan-v2"])
+ print("-- expected ^ | v rendered --")
+ print(files["/etc/netplan/50-cloud-init.yaml"])
self.assertEqual(
- entry['expected_netplan-v2'].splitlines(),
- files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+ entry["expected_netplan-v2"].splitlines(),
+ files["/etc/netplan/50-cloud-init.yaml"].splitlines(),
+ )
def testsimple_render_small_netplan(self):
- entry = NETWORK_CONFIGS['small']
- files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["small"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self.assertEqual(
- entry['expected_netplan'].splitlines(),
- files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+ entry["expected_netplan"].splitlines(),
+ files["/etc/netplan/50-cloud-init.yaml"].splitlines(),
+ )
def testsimple_render_v4_and_v6(self):
- entry = NETWORK_CONFIGS['v4_and_v6']
- files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["v4_and_v6"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self.assertEqual(
- entry['expected_netplan'].splitlines(),
- files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+ entry["expected_netplan"].splitlines(),
+ files["/etc/netplan/50-cloud-init.yaml"].splitlines(),
+ )
def testsimple_render_v4_and_v6_static(self):
- entry = NETWORK_CONFIGS['v4_and_v6_static']
- files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["v4_and_v6_static"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self.assertEqual(
- entry['expected_netplan'].splitlines(),
- files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+ entry["expected_netplan"].splitlines(),
+ files["/etc/netplan/50-cloud-init.yaml"].splitlines(),
+ )
def testsimple_render_dhcpv6_only(self):
- entry = NETWORK_CONFIGS['dhcpv6_only']
- files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["dhcpv6_only"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self.assertEqual(
- entry['expected_netplan'].splitlines(),
- files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+ entry["expected_netplan"].splitlines(),
+ files["/etc/netplan/50-cloud-init.yaml"].splitlines(),
+ )
def testsimple_render_dhcpv6_accept_ra(self):
- entry = NETWORK_CONFIGS['dhcpv6_accept_ra']
- files = self._render_and_read(network_config=yaml.load(
- entry['yaml_v1']))
+ entry = NETWORK_CONFIGS["dhcpv6_accept_ra"]
+ files = self._render_and_read(
+ network_config=yaml.load(entry["yaml_v1"])
+ )
self.assertEqual(
- entry['expected_netplan'].splitlines(),
- files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+ entry["expected_netplan"].splitlines(),
+ files["/etc/netplan/50-cloud-init.yaml"].splitlines(),
+ )
def testsimple_render_dhcpv6_reject_ra(self):
- entry = NETWORK_CONFIGS['dhcpv6_reject_ra']
- files = self._render_and_read(network_config=yaml.load(
- entry['yaml_v1']))
+ entry = NETWORK_CONFIGS["dhcpv6_reject_ra"]
+ files = self._render_and_read(
+ network_config=yaml.load(entry["yaml_v1"])
+ )
self.assertEqual(
- entry['expected_netplan'].splitlines(),
- files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+ entry["expected_netplan"].splitlines(),
+ files["/etc/netplan/50-cloud-init.yaml"].splitlines(),
+ )
def testsimple_render_ipv6_slaac(self):
- entry = NETWORK_CONFIGS['ipv6_slaac']
- files = self._render_and_read(network_config=yaml.load(
- entry['yaml']))
+ entry = NETWORK_CONFIGS["ipv6_slaac"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self.assertEqual(
- entry['expected_netplan'].splitlines(),
- files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+ entry["expected_netplan"].splitlines(),
+ files["/etc/netplan/50-cloud-init.yaml"].splitlines(),
+ )
def testsimple_render_dhcpv6_stateless(self):
- entry = NETWORK_CONFIGS['dhcpv6_stateless']
- files = self._render_and_read(network_config=yaml.load(
- entry['yaml']))
+ entry = NETWORK_CONFIGS["dhcpv6_stateless"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self.assertEqual(
- entry['expected_netplan'].splitlines(),
- files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+ entry["expected_netplan"].splitlines(),
+ files["/etc/netplan/50-cloud-init.yaml"].splitlines(),
+ )
def testsimple_render_dhcpv6_stateful(self):
- entry = NETWORK_CONFIGS['dhcpv6_stateful']
- files = self._render_and_read(network_config=yaml.load(
- entry['yaml']))
+ entry = NETWORK_CONFIGS["dhcpv6_stateful"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self.assertEqual(
- entry['expected_netplan'].splitlines(),
- files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+ entry["expected_netplan"].splitlines(),
+ files["/etc/netplan/50-cloud-init.yaml"].splitlines(),
+ )
def testsimple_wakeonlan_disabled_config_v2(self):
- entry = NETWORK_CONFIGS['wakeonlan_disabled']
- files = self._render_and_read(network_config=yaml.load(
- entry['yaml_v2']))
+ entry = NETWORK_CONFIGS["wakeonlan_disabled"]
+ files = self._render_and_read(
+ network_config=yaml.load(entry["yaml_v2"])
+ )
self.assertEqual(
- entry['expected_netplan'].splitlines(),
- files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+ entry["expected_netplan"].splitlines(),
+ files["/etc/netplan/50-cloud-init.yaml"].splitlines(),
+ )
def testsimple_wakeonlan_enabled_config_v2(self):
- entry = NETWORK_CONFIGS['wakeonlan_enabled']
- files = self._render_and_read(network_config=yaml.load(
- entry['yaml_v2']))
+ entry = NETWORK_CONFIGS["wakeonlan_enabled"]
+ files = self._render_and_read(
+ network_config=yaml.load(entry["yaml_v2"])
+ )
self.assertEqual(
- entry['expected_netplan'].splitlines(),
- files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+ entry["expected_netplan"].splitlines(),
+ files["/etc/netplan/50-cloud-init.yaml"].splitlines(),
+ )
def testsimple_render_all(self):
- entry = NETWORK_CONFIGS['all']
- files = self._render_and_read(network_config=yaml.load(entry['yaml']))
- print(entry['expected_netplan'])
- print('-- expected ^ | v rendered --')
- print(files['/etc/netplan/50-cloud-init.yaml'])
+ entry = NETWORK_CONFIGS["all"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
+ print(entry["expected_netplan"])
+ print("-- expected ^ | v rendered --")
+ print(files["/etc/netplan/50-cloud-init.yaml"])
self.assertEqual(
- entry['expected_netplan'].splitlines(),
- files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+ entry["expected_netplan"].splitlines(),
+ files["/etc/netplan/50-cloud-init.yaml"].splitlines(),
+ )
def testsimple_render_manual(self):
- entry = NETWORK_CONFIGS['manual']
- files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["manual"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self.assertEqual(
- entry['expected_netplan'].splitlines(),
- files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+ entry["expected_netplan"].splitlines(),
+ files["/etc/netplan/50-cloud-init.yaml"].splitlines(),
+ )
def test_render_output_has_yaml_no_aliases(self):
entry = {
- 'yaml': V1_NAMESERVER_ALIAS,
- 'expected_netplan': NETPLAN_NO_ALIAS,
+ "yaml": V1_NAMESERVER_ALIAS,
+ "expected_netplan": NETPLAN_NO_ALIAS,
}
- network_config = yaml.load(entry['yaml'])
+ network_config = yaml.load(entry["yaml"])
ns = network_state.parse_net_config_data(network_config)
files = self._render_and_read(state=ns)
# check for alias
- content = files['/etc/netplan/50-cloud-init.yaml']
+ content = files["/etc/netplan/50-cloud-init.yaml"]
# test load the yaml to ensure we don't render something not loadable
# this allows single aliases, but not duplicate ones
- parsed = yaml.load(files['/etc/netplan/50-cloud-init.yaml'])
+ parsed = yaml.load(files["/etc/netplan/50-cloud-init.yaml"])
self.assertNotEqual(None, parsed)
# now look for any alias, avoid rendering them entirely
# generate the first anchor string using the template
# as of this writing, looks like "&id001"
- anchor = r'&' + Serializer.ANCHOR_TEMPLATE % 1
+ anchor = r"&" + Serializer.ANCHOR_TEMPLATE % 1
found_alias = re.search(anchor, content, re.MULTILINE)
if found_alias:
msg = "Error at: %s\nContent:\n%s" % (found_alias, content)
- raise ValueError('Found yaml alias in rendered netplan: ' + msg)
+ raise ValueError("Found yaml alias in rendered netplan: " + msg)
- print(entry['expected_netplan'])
- print('-- expected ^ | v rendered --')
- print(files['/etc/netplan/50-cloud-init.yaml'])
+ print(entry["expected_netplan"])
+ print("-- expected ^ | v rendered --")
+ print(files["/etc/netplan/50-cloud-init.yaml"])
self.assertEqual(
- entry['expected_netplan'].splitlines(),
- files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+ entry["expected_netplan"].splitlines(),
+ files["/etc/netplan/50-cloud-init.yaml"].splitlines(),
+ )
def test_render_output_supports_both_grat_arp_spelling(self):
entry = {
- 'yaml': NETPLAN_BOND_GRAT_ARP,
- 'expected_netplan': NETPLAN_BOND_GRAT_ARP.replace('gratuitous',
- 'gratuitious'),
+ "yaml": NETPLAN_BOND_GRAT_ARP,
+ "expected_netplan": NETPLAN_BOND_GRAT_ARP.replace(
+ "gratuitous", "gratuitious"
+ ),
}
- network_config = yaml.load(entry['yaml']).get('network')
+ network_config = yaml.load(entry["yaml"]).get("network")
files = self._render_and_read(network_config=network_config)
- print(entry['expected_netplan'])
- print('-- expected ^ | v rendered --')
- print(files['/etc/netplan/50-cloud-init.yaml'])
+ print(entry["expected_netplan"])
+ print("-- expected ^ | v rendered --")
+ print(files["/etc/netplan/50-cloud-init.yaml"])
self.assertEqual(
- entry['expected_netplan'].splitlines(),
- files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+ entry["expected_netplan"].splitlines(),
+ files["/etc/netplan/50-cloud-init.yaml"].splitlines(),
+ )
class TestEniRoundTrip(CiTestCase):
-
- def _render_and_read(self, network_config=None, state=None, eni_path=None,
- netrules_path=None, dir=None):
+ def _render_and_read(
+ self,
+ network_config=None,
+ state=None,
+ eni_path=None,
+ netrules_path=None,
+ dir=None,
+ ):
if dir is None:
dir = self.tmp_dir()
@@ -4685,10 +5627,11 @@ class TestEniRoundTrip(CiTestCase):
raise ValueError("Expected data or state, got neither")
if eni_path is None:
- eni_path = 'etc/network/interfaces'
+ eni_path = "etc/network/interfaces"
renderer = eni.Renderer(
- config={'eni_path': eni_path, 'netrules_path': netrules_path})
+ config={"eni_path": eni_path, "netrules_path": netrules_path}
+ )
renderer.render_network_state(ns, target=dir)
return dir2dict(dir)
@@ -4698,95 +5641,112 @@ class TestEniRoundTrip(CiTestCase):
files = self._render_and_read(network_config=network_config)
self.assertEqual(
RENDERED_ENI.splitlines(),
- files['/etc/network/interfaces'].splitlines())
+ files["/etc/network/interfaces"].splitlines(),
+ )
def testsimple_render_all(self):
- entry = NETWORK_CONFIGS['all']
- files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["all"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self.assertEqual(
- entry['expected_eni'].splitlines(),
- files['/etc/network/interfaces'].splitlines())
+ entry["expected_eni"].splitlines(),
+ files["/etc/network/interfaces"].splitlines(),
+ )
def testsimple_render_small(self):
- entry = NETWORK_CONFIGS['small']
- files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["small"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self.assertEqual(
- entry['expected_eni'].splitlines(),
- files['/etc/network/interfaces'].splitlines())
+ entry["expected_eni"].splitlines(),
+ files["/etc/network/interfaces"].splitlines(),
+ )
def testsimple_render_v4_and_v6(self):
- entry = NETWORK_CONFIGS['v4_and_v6']
- files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["v4_and_v6"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self.assertEqual(
- entry['expected_eni'].splitlines(),
- files['/etc/network/interfaces'].splitlines())
+ entry["expected_eni"].splitlines(),
+ files["/etc/network/interfaces"].splitlines(),
+ )
def testsimple_render_dhcpv6_only(self):
- entry = NETWORK_CONFIGS['dhcpv6_only']
- files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["dhcpv6_only"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self.assertEqual(
- entry['expected_eni'].splitlines(),
- files['/etc/network/interfaces'].splitlines())
+ entry["expected_eni"].splitlines(),
+ files["/etc/network/interfaces"].splitlines(),
+ )
def testsimple_render_v4_and_v6_static(self):
- entry = NETWORK_CONFIGS['v4_and_v6_static']
- files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["v4_and_v6_static"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self.assertEqual(
- entry['expected_eni'].splitlines(),
- files['/etc/network/interfaces'].splitlines())
+ entry["expected_eni"].splitlines(),
+ files["/etc/network/interfaces"].splitlines(),
+ )
def testsimple_render_dhcpv6_stateless(self):
- entry = NETWORK_CONFIGS['dhcpv6_stateless']
- files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["dhcpv6_stateless"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self.assertEqual(
- entry['expected_eni'].splitlines(),
- files['/etc/network/interfaces'].splitlines())
+ entry["expected_eni"].splitlines(),
+ files["/etc/network/interfaces"].splitlines(),
+ )
def testsimple_render_ipv6_slaac(self):
- entry = NETWORK_CONFIGS['ipv6_slaac']
- files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["ipv6_slaac"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self.assertEqual(
- entry['expected_eni'].splitlines(),
- files['/etc/network/interfaces'].splitlines())
+ entry["expected_eni"].splitlines(),
+ files["/etc/network/interfaces"].splitlines(),
+ )
def testsimple_render_dhcpv6_stateful(self):
- entry = NETWORK_CONFIGS['dhcpv6_stateless']
- files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["dhcpv6_stateless"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self.assertEqual(
- entry['expected_eni'].splitlines(),
- files['/etc/network/interfaces'].splitlines())
+ entry["expected_eni"].splitlines(),
+ files["/etc/network/interfaces"].splitlines(),
+ )
def testsimple_render_dhcpv6_accept_ra(self):
- entry = NETWORK_CONFIGS['dhcpv6_accept_ra']
- files = self._render_and_read(network_config=yaml.load(
- entry['yaml_v1']))
+ entry = NETWORK_CONFIGS["dhcpv6_accept_ra"]
+ files = self._render_and_read(
+ network_config=yaml.load(entry["yaml_v1"])
+ )
self.assertEqual(
- entry['expected_eni'].splitlines(),
- files['/etc/network/interfaces'].splitlines())
+ entry["expected_eni"].splitlines(),
+ files["/etc/network/interfaces"].splitlines(),
+ )
def testsimple_render_dhcpv6_reject_ra(self):
- entry = NETWORK_CONFIGS['dhcpv6_reject_ra']
- files = self._render_and_read(network_config=yaml.load(
- entry['yaml_v1']))
+ entry = NETWORK_CONFIGS["dhcpv6_reject_ra"]
+ files = self._render_and_read(
+ network_config=yaml.load(entry["yaml_v1"])
+ )
self.assertEqual(
- entry['expected_eni'].splitlines(),
- files['/etc/network/interfaces'].splitlines())
+ entry["expected_eni"].splitlines(),
+ files["/etc/network/interfaces"].splitlines(),
+ )
def testsimple_wakeonlan_disabled_config_v2(self):
- entry = NETWORK_CONFIGS['wakeonlan_disabled']
- files = self._render_and_read(network_config=yaml.load(
- entry['yaml_v2']))
+ entry = NETWORK_CONFIGS["wakeonlan_disabled"]
+ files = self._render_and_read(
+ network_config=yaml.load(entry["yaml_v2"])
+ )
self.assertEqual(
- entry['expected_eni'].splitlines(),
- files['/etc/network/interfaces'].splitlines())
+ entry["expected_eni"].splitlines(),
+ files["/etc/network/interfaces"].splitlines(),
+ )
def testsimple_wakeonlan_enabled_config_v2(self):
- entry = NETWORK_CONFIGS['wakeonlan_enabled']
- files = self._render_and_read(network_config=yaml.load(
- entry['yaml_v2']))
+ entry = NETWORK_CONFIGS["wakeonlan_enabled"]
+ files = self._render_and_read(
+ network_config=yaml.load(entry["yaml_v2"])
+ )
self.assertEqual(
- entry['expected_eni'].splitlines(),
- files['/etc/network/interfaces'].splitlines())
+ entry["expected_eni"].splitlines(),
+ files["/etc/network/interfaces"].splitlines(),
+ )
def testsimple_render_manual(self):
"""Test rendering of 'manual' for 'type' and 'control'.
@@ -4796,165 +5756,471 @@ class TestEniRoundTrip(CiTestCase):
if there were no addresses to configure. Also strange is the fact
that in order to apply that MTU the ifupdown device must be set
to 'auto', or the MTU would not be set."""
- entry = NETWORK_CONFIGS['manual']
- files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["manual"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self.assertEqual(
- entry['expected_eni'].splitlines(),
- files['/etc/network/interfaces'].splitlines())
+ entry["expected_eni"].splitlines(),
+ files["/etc/network/interfaces"].splitlines(),
+ )
def test_routes_rendered(self):
# as reported in bug 1649652
conf = [
- {'name': 'eth0', 'type': 'physical',
- 'subnets': [{
- 'address': '172.23.31.42/26',
- 'dns_nameservers': [], 'gateway': '172.23.31.2',
- 'type': 'static'}]},
- {'type': 'route', 'id': 4,
- 'metric': 0, 'destination': '10.0.0.0/12',
- 'gateway': '172.23.31.1'},
- {'type': 'route', 'id': 5,
- 'metric': 0, 'destination': '192.168.2.0/16',
- 'gateway': '172.23.31.1'},
- {'type': 'route', 'id': 6,
- 'metric': 1, 'destination': '10.0.200.0/16',
- 'gateway': '172.23.31.1'},
+ {
+ "name": "eth0",
+ "type": "physical",
+ "subnets": [
+ {
+ "address": "172.23.31.42/26",
+ "dns_nameservers": [],
+ "gateway": "172.23.31.2",
+ "type": "static",
+ }
+ ],
+ },
+ {
+ "type": "route",
+ "id": 4,
+ "metric": 0,
+ "destination": "10.0.0.0/12",
+ "gateway": "172.23.31.1",
+ },
+ {
+ "type": "route",
+ "id": 5,
+ "metric": 0,
+ "destination": "192.168.2.0/16",
+ "gateway": "172.23.31.1",
+ },
+ {
+ "type": "route",
+ "id": 6,
+ "metric": 1,
+ "destination": "10.0.200.0/16",
+ "gateway": "172.23.31.1",
+ },
+ {
+ "type": "route",
+ "id": 7,
+ "metric": 1,
+ "destination": "10.0.0.100/32",
+ "gateway": "172.23.31.1",
+ },
]
files = self._render_and_read(
- network_config={'config': conf, 'version': 1})
+ network_config={"config": conf, "version": 1}
+ )
expected = [
- 'auto lo',
- 'iface lo inet loopback',
- 'auto eth0',
- 'iface eth0 inet static',
- ' address 172.23.31.42/26',
- ' gateway 172.23.31.2',
- ('post-up route add -net 10.0.0.0/12 gw '
- '172.23.31.1 metric 0 || true'),
- ('pre-down route del -net 10.0.0.0/12 gw '
- '172.23.31.1 metric 0 || true'),
- ('post-up route add -net 192.168.2.0/16 gw '
- '172.23.31.1 metric 0 || true'),
- ('pre-down route del -net 192.168.2.0/16 gw '
- '172.23.31.1 metric 0 || true'),
- ('post-up route add -net 10.0.200.0/16 gw '
- '172.23.31.1 metric 1 || true'),
- ('pre-down route del -net 10.0.200.0/16 gw '
- '172.23.31.1 metric 1 || true'),
+ "auto lo",
+ "iface lo inet loopback",
+ "auto eth0",
+ "iface eth0 inet static",
+ " address 172.23.31.42/26",
+ " gateway 172.23.31.2",
+ "post-up route add -net 10.0.0.0/12 gw "
+ "172.23.31.1 metric 0 || true",
+ "pre-down route del -net 10.0.0.0/12 gw "
+ "172.23.31.1 metric 0 || true",
+ "post-up route add -net 192.168.2.0/16 gw "
+ "172.23.31.1 metric 0 || true",
+ "pre-down route del -net 192.168.2.0/16 gw "
+ "172.23.31.1 metric 0 || true",
+ "post-up route add -net 10.0.200.0/16 gw "
+ "172.23.31.1 metric 1 || true",
+ "pre-down route del -net 10.0.200.0/16 gw "
+ "172.23.31.1 metric 1 || true",
+ "post-up route add -host 10.0.0.100/32 gw "
+ "172.23.31.1 metric 1 || true",
+ "pre-down route del -host 10.0.0.100/32 gw "
+ "172.23.31.1 metric 1 || true",
]
- found = files['/etc/network/interfaces'].splitlines()
+ found = files["/etc/network/interfaces"].splitlines()
- self.assertEqual(
- expected, [line for line in found if line])
+ self.assertEqual(expected, [line for line in found if line])
def test_ipv6_static_routes(self):
# as reported in bug 1818669
conf = [
- {'name': 'eno3', 'type': 'physical',
- 'subnets': [{
- 'address': 'fd00::12/64',
- 'dns_nameservers': ['fd00:2::15'],
- 'gateway': 'fd00::1',
- 'ipv6': True,
- 'type': 'static',
- 'routes': [{'netmask': '32',
- 'network': 'fd00:12::',
- 'gateway': 'fd00::2'},
- {'network': 'fd00:14::',
- 'gateway': 'fd00::3'},
- {'destination': 'fe00:14::/48',
- 'gateway': 'fe00::4',
- 'metric': 500},
- {'gateway': '192.168.23.1',
- 'metric': 999,
- 'netmask': 24,
- 'network': '192.168.23.0'},
- {'destination': '10.23.23.0/24',
- 'gateway': '10.23.23.2',
- 'metric': 300}]}]},
+ {
+ "name": "eno3",
+ "type": "physical",
+ "subnets": [
+ {
+ "address": "fd00::12/64",
+ "dns_nameservers": ["fd00:2::15"],
+ "gateway": "fd00::1",
+ "ipv6": True,
+ "type": "static",
+ "routes": [
+ {
+ "netmask": "32",
+ "network": "fd00:12::",
+ "gateway": "fd00::2",
+ },
+ {"network": "fd00:14::", "gateway": "fd00::3"},
+ {
+ "destination": "fe00:14::/48",
+ "gateway": "fe00::4",
+ "metric": 500,
+ },
+ {
+ "gateway": "192.168.23.1",
+ "metric": 999,
+ "netmask": 24,
+ "network": "192.168.23.0",
+ },
+ {
+ "destination": "10.23.23.0/24",
+ "gateway": "10.23.23.2",
+ "metric": 300,
+ },
+ ],
+ }
+ ],
+ },
]
files = self._render_and_read(
- network_config={'config': conf, 'version': 1})
+ network_config={"config": conf, "version": 1}
+ )
expected = [
- 'auto lo',
- 'iface lo inet loopback',
- 'auto eno3',
- 'iface eno3 inet6 static',
- ' address fd00::12/64',
- ' dns-nameservers fd00:2::15',
- ' gateway fd00::1',
- (' post-up route add -A inet6 fd00:12::/32 gw '
- 'fd00::2 || true'),
- (' pre-down route del -A inet6 fd00:12::/32 gw '
- 'fd00::2 || true'),
- (' post-up route add -A inet6 fd00:14::/64 gw '
- 'fd00::3 || true'),
- (' pre-down route del -A inet6 fd00:14::/64 gw '
- 'fd00::3 || true'),
- (' post-up route add -A inet6 fe00:14::/48 gw '
- 'fe00::4 metric 500 || true'),
- (' pre-down route del -A inet6 fe00:14::/48 gw '
- 'fe00::4 metric 500 || true'),
- (' post-up route add -net 192.168.23.0/24 gw '
- '192.168.23.1 metric 999 || true'),
- (' pre-down route del -net 192.168.23.0/24 gw '
- '192.168.23.1 metric 999 || true'),
- (' post-up route add -net 10.23.23.0/24 gw '
- '10.23.23.2 metric 300 || true'),
- (' pre-down route del -net 10.23.23.0/24 gw '
- '10.23.23.2 metric 300 || true'),
-
+ "auto lo",
+ "iface lo inet loopback",
+ "auto eno3",
+ "iface eno3 inet6 static",
+ " address fd00::12/64",
+ " dns-nameservers fd00:2::15",
+ " gateway fd00::1",
+ " post-up route add -A inet6 fd00:12::/32 gw fd00::2 || true",
+ " pre-down route del -A inet6 fd00:12::/32 gw fd00::2 || true",
+ " post-up route add -A inet6 fd00:14::/64 gw fd00::3 || true",
+ " pre-down route del -A inet6 fd00:14::/64 gw fd00::3 || true",
+ " post-up route add -A inet6 fe00:14::/48 gw "
+ "fe00::4 metric 500 || true",
+ " pre-down route del -A inet6 fe00:14::/48 gw "
+ "fe00::4 metric 500 || true",
+ " post-up route add -net 192.168.23.0/24 gw "
+ "192.168.23.1 metric 999 || true",
+ " pre-down route del -net 192.168.23.0/24 gw "
+ "192.168.23.1 metric 999 || true",
+ " post-up route add -net 10.23.23.0/24 gw "
+ "10.23.23.2 metric 300 || true",
+ " pre-down route del -net 10.23.23.0/24 gw "
+ "10.23.23.2 metric 300 || true",
]
- found = files['/etc/network/interfaces'].splitlines()
+ found = files["/etc/network/interfaces"].splitlines()
- self.assertEqual(
- expected, [line for line in found if line])
+ self.assertEqual(expected, [line for line in found if line])
def testsimple_render_bond(self):
- entry = NETWORK_CONFIGS['bond']
- files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ entry = NETWORK_CONFIGS["bond"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
self.assertEqual(
- entry['expected_eni'].splitlines(),
- files['/etc/network/interfaces'].splitlines())
+ entry["expected_eni"].splitlines(),
+ files["/etc/network/interfaces"].splitlines(),
+ )
-class TestRenderersSelect:
+class TestNetworkdNetRendering(CiTestCase):
+ def create_conf_dict(self, contents):
+ content_dict = {}
+ for line in contents:
+ if line:
+ line = line.strip()
+ if line and re.search(r"^\[(.+)\]$", line):
+ content_dict[line] = []
+ key = line
+ elif line:
+ content_dict[key].append(line)
+
+ return content_dict
+
+ def compare_dicts(self, actual, expected):
+ for k, v in actual.items():
+ self.assertEqual(sorted(expected[k]), sorted(v))
+
+ @mock.patch("cloudinit.net.util.chownbyname", return_value=True)
+ @mock.patch("cloudinit.net.util.get_cmdline", return_value="root=myroot")
+ @mock.patch("cloudinit.net.sys_dev_path")
+ @mock.patch("cloudinit.net.read_sys_net")
+ @mock.patch("cloudinit.net.get_devicelist")
+ def test_networkd_default_generation(
+ self,
+ mock_get_devicelist,
+ mock_read_sys_net,
+ mock_sys_dev_path,
+ m_get_cmdline,
+ m_chown,
+ ):
+ tmp_dir = self.tmp_dir()
+ _setup_test(
+ tmp_dir, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path
+ )
+
+ network_cfg = net.generate_fallback_config()
+ ns = network_state.parse_net_config_data(
+ network_cfg, skip_broken=False
+ )
+
+ render_dir = os.path.join(tmp_dir, "render")
+ os.makedirs(render_dir)
+
+ render_target = "etc/systemd/network/10-cloud-init-eth1000.network"
+ renderer = networkd.Renderer({})
+ renderer.render_network_state(ns, target=render_dir)
+
+ self.assertTrue(
+ os.path.exists(os.path.join(render_dir, render_target))
+ )
+ with open(os.path.join(render_dir, render_target)) as fh:
+ contents = fh.readlines()
+
+ actual = self.create_conf_dict(contents)
+ print(actual)
+
+ expected = textwrap.dedent(
+ """\
+ [Match]
+ Name=eth1000
+ MACAddress=07-1c-c6-75-a4-be
+ [Network]
+ DHCP=ipv4"""
+ ).rstrip(" ")
+
+ expected = self.create_conf_dict(expected.splitlines())
+
+ self.compare_dicts(actual, expected)
+
+
+class TestNetworkdRoundTrip(CiTestCase):
+ def create_conf_dict(self, contents):
+ content_dict = {}
+ for line in contents:
+ if line:
+ line = line.strip()
+ if line and re.search(r"^\[(.+)\]$", line):
+ content_dict[line] = []
+ key = line
+ elif line:
+ content_dict[key].append(line)
+
+ return content_dict
+
+ def compare_dicts(self, actual, expected):
+ for k, v in actual.items():
+ self.assertEqual(sorted(expected[k]), sorted(v))
+
+ def _render_and_read(
+ self, network_config=None, state=None, nwkd_path=None, dir=None
+ ):
+ if dir is None:
+ dir = self.tmp_dir()
+
+ if network_config:
+ ns = network_state.parse_net_config_data(network_config)
+ elif state:
+ ns = state
+ else:
+ raise ValueError("Expected data or state, got neither")
+
+ if not nwkd_path:
+ nwkd_path = "/etc/systemd/network/"
+
+ renderer = networkd.Renderer(config={"network_conf_dir": nwkd_path})
+
+ renderer.render_network_state(ns, target=dir)
+ return dir2dict(dir)
+
+ @mock.patch("cloudinit.net.util.chownbyname", return_value=True)
+ def testsimple_render_small_networkd(self, m_chown):
+ nwk_fn1 = "/etc/systemd/network/10-cloud-init-eth99.network"
+ nwk_fn2 = "/etc/systemd/network/10-cloud-init-eth1.network"
+ entry = NETWORK_CONFIGS["small"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
+
+ actual = files[nwk_fn1].splitlines()
+ actual = self.create_conf_dict(actual)
+
+ expected = entry["expected_networkd_eth99"].splitlines()
+ expected = self.create_conf_dict(expected)
+
+ self.compare_dicts(actual, expected)
+
+ actual = files[nwk_fn2].splitlines()
+ actual = self.create_conf_dict(actual)
+
+ expected = entry["expected_networkd_eth1"].splitlines()
+ expected = self.create_conf_dict(expected)
+
+ self.compare_dicts(actual, expected)
+
+ @mock.patch("cloudinit.net.util.chownbyname", return_value=True)
+ def testsimple_render_v4_and_v6(self, m_chown):
+ nwk_fn = "/etc/systemd/network/10-cloud-init-iface0.network"
+ entry = NETWORK_CONFIGS["v4_and_v6"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
+
+ actual = files[nwk_fn].splitlines()
+ actual = self.create_conf_dict(actual)
+
+ expected = entry["expected_networkd"].splitlines()
+ expected = self.create_conf_dict(expected)
+
+ self.compare_dicts(actual, expected)
+
+ @mock.patch("cloudinit.net.util.chownbyname", return_value=True)
+ def testsimple_render_v4_and_v6_static(self, m_chown):
+ nwk_fn = "/etc/systemd/network/10-cloud-init-iface0.network"
+ entry = NETWORK_CONFIGS["v4_and_v6_static"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
+
+ actual = files[nwk_fn].splitlines()
+ actual = self.create_conf_dict(actual)
+
+ expected = entry["expected_networkd"].splitlines()
+ expected = self.create_conf_dict(expected)
+
+ self.compare_dicts(actual, expected)
+
+ @mock.patch("cloudinit.net.util.chownbyname", return_value=True)
+ def testsimple_render_dhcpv6_only(self, m_chown):
+ nwk_fn = "/etc/systemd/network/10-cloud-init-iface0.network"
+ entry = NETWORK_CONFIGS["dhcpv6_only"]
+ files = self._render_and_read(network_config=yaml.load(entry["yaml"]))
+
+ actual = files[nwk_fn].splitlines()
+ actual = self.create_conf_dict(actual)
+
+ expected = entry["expected_networkd"].splitlines()
+ expected = self.create_conf_dict(expected)
+
+ self.compare_dicts(actual, expected)
+
+ @mock.patch("cloudinit.net.util.chownbyname", return_value=True)
+ def test_dhcpv6_accept_ra_config_v1(self, m_chown):
+ nwk_fn = "/etc/systemd/network/10-cloud-init-iface0.network"
+ entry = NETWORK_CONFIGS["dhcpv6_accept_ra"]
+ files = self._render_and_read(
+ network_config=yaml.load(entry["yaml_v1"])
+ )
+
+ actual = files[nwk_fn].splitlines()
+ actual = self.create_conf_dict(actual)
+ expected = entry["expected_networkd"].splitlines()
+ expected = self.create_conf_dict(expected)
+
+ self.compare_dicts(actual, expected)
+
+ @mock.patch("cloudinit.net.util.chownbyname", return_value=True)
+ def test_dhcpv6_accept_ra_config_v2(self, m_chown):
+ nwk_fn = "/etc/systemd/network/10-cloud-init-iface0.network"
+ entry = NETWORK_CONFIGS["dhcpv6_accept_ra"]
+ files = self._render_and_read(
+ network_config=yaml.load(entry["yaml_v2"])
+ )
+
+ actual = files[nwk_fn].splitlines()
+ actual = self.create_conf_dict(actual)
+
+ expected = entry["expected_networkd"].splitlines()
+ expected = self.create_conf_dict(expected)
+
+ self.compare_dicts(actual, expected)
+
+ @mock.patch("cloudinit.net.util.chownbyname", return_value=True)
+ def test_dhcpv6_reject_ra_config_v1(self, m_chown):
+ nwk_fn = "/etc/systemd/network/10-cloud-init-iface0.network"
+ entry = NETWORK_CONFIGS["dhcpv6_reject_ra"]
+ files = self._render_and_read(
+ network_config=yaml.load(entry["yaml_v1"])
+ )
+
+ actual = files[nwk_fn].splitlines()
+ actual = self.create_conf_dict(actual)
+
+ expected = entry["expected_networkd"].splitlines()
+ expected = self.create_conf_dict(expected)
+
+ self.compare_dicts(actual, expected)
+
+ @mock.patch("cloudinit.net.util.chownbyname", return_value=True)
+ def test_dhcpv6_reject_ra_config_v2(self, m_chown):
+ nwk_fn = "/etc/systemd/network/10-cloud-init-iface0.network"
+ entry = NETWORK_CONFIGS["dhcpv6_reject_ra"]
+ files = self._render_and_read(
+ network_config=yaml.load(entry["yaml_v2"])
+ )
+
+ actual = files[nwk_fn].splitlines()
+ actual = self.create_conf_dict(actual)
+
+ expected = entry["expected_networkd"].splitlines()
+ expected = self.create_conf_dict(expected)
+
+ self.compare_dicts(actual, expected)
+
+
+class TestRenderersSelect:
@pytest.mark.parametrize(
- 'renderer_selected,netplan,eni,nm,scfg,sys', (
+ "renderer_selected,netplan,eni,nm,scfg,sys,networkd",
+ (
# -netplan -ifupdown -nm -scfg -sys raises error
- (net.RendererNotFoundError, False, False, False, False, False),
+ (
+ net.RendererNotFoundError,
+ False,
+ False,
+ False,
+ False,
+ False,
+ False,
+ ),
# -netplan +ifupdown -nm -scfg -sys selects eni
- ('eni', False, True, False, False, False),
+ ("eni", False, True, False, False, False, False),
# +netplan +ifupdown -nm -scfg -sys selects eni
- ('eni', True, True, False, False, False),
+ ("eni", True, True, False, False, False, False),
# +netplan -ifupdown -nm -scfg -sys selects netplan
- ('netplan', True, False, False, False, False),
+ ("netplan", True, False, False, False, False, False),
# Ubuntu with Network-Manager installed
# +netplan -ifupdown +nm -scfg -sys selects netplan
- ('netplan', True, False, True, False, False),
+ ("netplan", True, False, True, False, False, False),
# Centos/OpenSuse with Network-Manager installed selects sysconfig
# -netplan -ifupdown +nm -scfg +sys selects netplan
- ('sysconfig', False, False, True, False, True),
+ ("sysconfig", False, False, True, False, True, False),
+ # -netplan -ifupdown -nm -scfg -sys +networkd selects networkd
+ ("networkd", False, False, False, False, False, True),
),
)
+ @mock.patch("cloudinit.net.renderers.networkd.available")
@mock.patch("cloudinit.net.renderers.netplan.available")
@mock.patch("cloudinit.net.renderers.sysconfig.available")
@mock.patch("cloudinit.net.renderers.sysconfig.available_sysconfig")
@mock.patch("cloudinit.net.renderers.sysconfig.available_nm")
@mock.patch("cloudinit.net.renderers.eni.available")
def test_valid_renderer_from_defaults_depending_on_availability(
- self, m_eni_avail, m_nm_avail, m_scfg_avail, m_sys_avail,
- m_netplan_avail, renderer_selected, netplan, eni, nm, scfg, sys
+ self,
+ m_eni_avail,
+ m_nm_avail,
+ m_scfg_avail,
+ m_sys_avail,
+ m_netplan_avail,
+ m_networkd_avail,
+ renderer_selected,
+ netplan,
+ eni,
+ nm,
+ scfg,
+ sys,
+ networkd,
):
"""Assert proper renderer per DEFAULT_PRIORITY given availability."""
- m_eni_avail.return_value = eni # ifupdown pkg presence
- m_nm_avail.return_value = nm # network-manager presence
- m_scfg_avail.return_value = scfg # sysconfig presence
- m_sys_avail.return_value = sys # sysconfig/ifup/down presence
+ m_eni_avail.return_value = eni # ifupdown pkg presence
+ m_nm_avail.return_value = nm # network-manager presence
+ m_scfg_avail.return_value = scfg # sysconfig presence
+ m_sys_avail.return_value = sys # sysconfig/ifup/down presence
m_netplan_avail.return_value = netplan # netplan presence
+ m_networkd_avail.return_value = networkd # networkd presence
if isinstance(renderer_selected, str):
(renderer_name, _rnd_class) = renderers.select(
priority=renderers.DEFAULT_PRIORITY
@@ -4971,14 +6237,14 @@ class TestNetRenderers(CiTestCase):
def test_eni_and_sysconfig_available(self, m_eni_avail, m_sysc_avail):
m_eni_avail.return_value = True
m_sysc_avail.return_value = True
- found = renderers.search(priority=['sysconfig', 'eni'], first=False)
+ found = renderers.search(priority=["sysconfig", "eni"], first=False)
names = [f[0] for f in found]
- self.assertEqual(['sysconfig', 'eni'], names)
+ self.assertEqual(["sysconfig", "eni"], names)
@mock.patch("cloudinit.net.renderers.eni.available")
def test_search_returns_empty_on_none(self, m_eni_avail):
m_eni_avail.return_value = False
- found = renderers.search(priority=['eni'], first=False)
+ found = renderers.search(priority=["eni"], first=False)
self.assertEqual([], found)
@mock.patch("cloudinit.net.renderers.sysconfig.available")
@@ -4987,16 +6253,16 @@ class TestNetRenderers(CiTestCase):
# available should only be called until one is found.
m_eni_avail.return_value = True
m_sysc_avail.side_effect = Exception("Should not call me")
- found = renderers.search(priority=['eni', 'sysconfig'], first=True)
- self.assertEqual(['eni'], [found[0]])
+ found = renderers.search(priority=["eni", "sysconfig"], first=True)[0]
+ self.assertEqual(["eni"], [found[0]])
@mock.patch("cloudinit.net.renderers.sysconfig.available")
@mock.patch("cloudinit.net.renderers.eni.available")
def test_select_positive(self, m_eni_avail, m_sysc_avail):
m_eni_avail.return_value = True
m_sysc_avail.return_value = False
- found = renderers.select(priority=['sysconfig', 'eni'])
- self.assertEqual('eni', found[0])
+ found = renderers.select(priority=["sysconfig", "eni"])
+ self.assertEqual("eni", found[0])
@mock.patch("cloudinit.net.renderers.sysconfig.available")
@mock.patch("cloudinit.net.renderers.eni.available")
@@ -5005,89 +6271,120 @@ class TestNetRenderers(CiTestCase):
m_eni_avail.return_value = False
m_sysc_avail.return_value = False
- self.assertRaises(net.RendererNotFoundError, renderers.select,
- priority=['sysconfig', 'eni'])
+ self.assertRaises(
+ net.RendererNotFoundError,
+ renderers.select,
+ priority=["sysconfig", "eni"],
+ )
@mock.patch("cloudinit.net.sysconfig.available_sysconfig")
- @mock.patch("cloudinit.util.get_linux_distro")
- def test_sysconfig_available_uses_variant_mapping(self, m_distro, m_avail):
+ @mock.patch("cloudinit.util.system_info")
+ def test_sysconfig_available_uses_variant_mapping(self, m_info, m_avail):
m_avail.return_value = True
- distro_values = [
- ('opensuse', '', ''),
- ('opensuse-leap', '', ''),
- ('opensuse-tumbleweed', '', ''),
- ('sles', '', ''),
- ('centos', '', ''),
- ('fedora', '', ''),
- ('redhat', '', ''),
+ variants = [
+ "suse",
+ "centos",
+ "eurolinux",
+ "fedora",
+ "rhel",
]
- for (distro_name, distro_version, flavor) in distro_values:
- m_distro.return_value = (distro_name, distro_version, flavor)
+ for distro_name in variants:
+ m_info.return_value = {"variant": distro_name}
if hasattr(util.system_info, "cache_clear"):
util.system_info.cache_clear()
result = sysconfig.available()
self.assertTrue(result)
+ @mock.patch("cloudinit.net.renderers.networkd.available")
+ def test_networkd_available(self, m_nwkd_avail):
+ m_nwkd_avail.return_value = True
+ found = renderers.search(priority=["networkd"], first=False)
+ self.assertEqual("networkd", found[0][0])
+
+@mock.patch(
+ "cloudinit.net.is_openvswitch_internal_interface",
+ mock.Mock(return_value=False),
+)
class TestGetInterfaces(CiTestCase):
- _data = {'bonds': ['bond1'],
- 'bridges': ['bridge1'],
- 'vlans': ['bond1.101'],
- 'own_macs': ['enp0s1', 'enp0s2', 'bridge1-nic', 'bridge1',
- 'bond1.101', 'lo', 'eth1'],
- 'macs': {'enp0s1': 'aa:aa:aa:aa:aa:01',
- 'enp0s2': 'aa:aa:aa:aa:aa:02',
- 'bond1': 'aa:aa:aa:aa:aa:01',
- 'bond1.101': 'aa:aa:aa:aa:aa:01',
- 'bridge1': 'aa:aa:aa:aa:aa:03',
- 'bridge1-nic': 'aa:aa:aa:aa:aa:03',
- 'lo': '00:00:00:00:00:00',
- 'greptap0': '00:00:00:00:00:00',
- 'eth1': 'aa:aa:aa:aa:aa:01',
- 'tun0': None},
- 'drivers': {'enp0s1': 'virtio_net',
- 'enp0s2': 'e1000',
- 'bond1': None,
- 'bond1.101': None,
- 'bridge1': None,
- 'bridge1-nic': None,
- 'lo': None,
- 'greptap0': None,
- 'eth1': 'mlx4_core',
- 'tun0': None}}
+ _data = {
+ "bonds": ["bond1"],
+ "bridges": ["bridge1"],
+ "vlans": ["bond1.101"],
+ "own_macs": [
+ "enp0s1",
+ "enp0s2",
+ "bridge1-nic",
+ "bridge1",
+ "bond1.101",
+ "lo",
+ "eth1",
+ ],
+ "macs": {
+ "enp0s1": "aa:aa:aa:aa:aa:01",
+ "enp0s2": "aa:aa:aa:aa:aa:02",
+ "bond1": "aa:aa:aa:aa:aa:01",
+ "bond1.101": "aa:aa:aa:aa:aa:01",
+ "bridge1": "aa:aa:aa:aa:aa:03",
+ "bridge1-nic": "aa:aa:aa:aa:aa:03",
+ "lo": "00:00:00:00:00:00",
+ "greptap0": "00:00:00:00:00:00",
+ "eth1": "aa:aa:aa:aa:aa:01",
+ "tun0": None,
+ },
+ "drivers": {
+ "enp0s1": "virtio_net",
+ "enp0s2": "e1000",
+ "bond1": None,
+ "bond1.101": None,
+ "bridge1": None,
+ "bridge1-nic": None,
+ "lo": None,
+ "greptap0": None,
+ "eth1": "mlx4_core",
+ "tun0": None,
+ },
+ }
data = {}
def _se_get_devicelist(self):
- return list(self.data['devices'])
+ return list(self.data["devices"])
def _se_device_driver(self, name):
- return self.data['drivers'][name]
+ return self.data["drivers"][name]
def _se_device_devid(self, name):
- return '0x%s' % sorted(list(self.data['drivers'].keys())).index(name)
+ return "0x%s" % sorted(list(self.data["drivers"].keys())).index(name)
def _se_get_interface_mac(self, name):
- return self.data['macs'][name]
+ return self.data["macs"][name]
def _se_is_bridge(self, name):
- return name in self.data['bridges']
+ return name in self.data["bridges"]
def _se_is_vlan(self, name):
- return name in self.data['vlans']
+ return name in self.data["vlans"]
def _se_interface_has_own_mac(self, name):
- return name in self.data['own_macs']
+ return name in self.data["own_macs"]
def _mock_setup(self):
self.data = copy.deepcopy(self._data)
- self.data['devices'] = set(list(self.data['macs'].keys()))
- mocks = ('get_devicelist', 'get_interface_mac', 'is_bridge',
- 'interface_has_own_mac', 'is_vlan', 'device_driver',
- 'device_devid')
+ self.data["devices"] = set(list(self.data["macs"].keys()))
+ mocks = (
+ "get_devicelist",
+ "get_interface_mac",
+ "is_bridge",
+ "interface_has_own_mac",
+ "is_vlan",
+ "device_driver",
+ "device_devid",
+ )
self.mocks = {}
for n in mocks:
- m = mock.patch('cloudinit.net.' + n,
- side_effect=getattr(self, '_se_' + n))
+ m = mock.patch(
+ "cloudinit.net." + n, side_effect=getattr(self, "_se_" + n)
+ )
self.addCleanup(m.stop)
self.mocks[n] = m.start()
@@ -5095,30 +6392,31 @@ class TestGetInterfaces(CiTestCase):
self._mock_setup()
ret = net.get_interfaces()
- self.assertIn('enp0s1', self._se_get_devicelist())
- self.assertIn('eth1', self._se_get_devicelist())
- found = [ent for ent in ret if 'aa:aa:aa:aa:aa:01' in ent]
+ self.assertIn("enp0s1", self._se_get_devicelist())
+ self.assertIn("eth1", self._se_get_devicelist())
+ found = [ent for ent in ret if "aa:aa:aa:aa:aa:01" in ent]
self.assertEqual(len(found), 2)
def test_gi_excludes_any_without_mac_address(self):
self._mock_setup()
ret = net.get_interfaces()
- self.assertIn('tun0', self._se_get_devicelist())
- found = [ent for ent in ret if 'tun0' in ent]
+ self.assertIn("tun0", self._se_get_devicelist())
+ found = [ent for ent in ret if "tun0" in ent]
self.assertEqual(len(found), 0)
def test_gi_excludes_stolen_macs(self):
self._mock_setup()
ret = net.get_interfaces()
- self.mocks['interface_has_own_mac'].assert_has_calls(
- [mock.call('enp0s1'), mock.call('bond1')], any_order=True)
+ self.mocks["interface_has_own_mac"].assert_has_calls(
+ [mock.call("enp0s1"), mock.call("bond1")], any_order=True
+ )
expected = [
- ('enp0s2', 'aa:aa:aa:aa:aa:02', 'e1000', '0x5'),
- ('enp0s1', 'aa:aa:aa:aa:aa:01', 'virtio_net', '0x4'),
- ('eth1', 'aa:aa:aa:aa:aa:01', 'mlx4_core', '0x6'),
- ('lo', '00:00:00:00:00:00', None, '0x8'),
- ('bridge1-nic', 'aa:aa:aa:aa:aa:03', None, '0x3'),
+ ("enp0s2", "aa:aa:aa:aa:aa:02", "e1000", "0x5"),
+ ("enp0s1", "aa:aa:aa:aa:aa:01", "virtio_net", "0x4"),
+ ("eth1", "aa:aa:aa:aa:aa:01", "mlx4_core", "0x6"),
+ ("lo", "00:00:00:00:00:00", None, "0x8"),
+ ("bridge1-nic", "aa:aa:aa:aa:aa:03", None, "0x3"),
]
self.assertEqual(sorted(expected), sorted(ret))
@@ -5127,24 +6425,29 @@ class TestGetInterfaces(CiTestCase):
# add a device 'b1', make all return they have their "own mac",
# set everything other than 'b1' to be a bridge.
# then expect b1 is the only thing left.
- self.data['macs']['b1'] = 'aa:aa:aa:aa:aa:b1'
- self.data['drivers']['b1'] = None
- self.data['devices'].add('b1')
- self.data['bonds'] = []
- self.data['own_macs'] = self.data['devices']
- self.data['bridges'] = [f for f in self.data['devices'] if f != "b1"]
+ self.data["macs"]["b1"] = "aa:aa:aa:aa:aa:b1"
+ self.data["drivers"]["b1"] = None
+ self.data["devices"].add("b1")
+ self.data["bonds"] = []
+ self.data["own_macs"] = self.data["devices"]
+ self.data["bridges"] = [f for f in self.data["devices"] if f != "b1"]
ret = net.get_interfaces()
- self.assertEqual([('b1', 'aa:aa:aa:aa:aa:b1', None, '0x0')], ret)
- self.mocks['is_bridge'].assert_has_calls(
- [mock.call('bridge1'), mock.call('enp0s1'), mock.call('bond1'),
- mock.call('b1')],
- any_order=True)
+ self.assertEqual([("b1", "aa:aa:aa:aa:aa:b1", None, "0x0")], ret)
+ self.mocks["is_bridge"].assert_has_calls(
+ [
+ mock.call("bridge1"),
+ mock.call("enp0s1"),
+ mock.call("bond1"),
+ mock.call("b1"),
+ ],
+ any_order=True,
+ )
class TestInterfaceHasOwnMac(CiTestCase):
"""Test interface_has_own_mac. This is admittedly a bit whitebox."""
- @mock.patch('cloudinit.net.read_sys_net_int', return_value=None)
+ @mock.patch("cloudinit.net.read_sys_net_int", return_value=None)
def test_non_strict_with_no_addr_assign_type(self, m_read_sys_net_int):
"""If nic does not have addr_assign_type, it is not "stolen".
@@ -5161,229 +6464,301 @@ class TestInterfaceHasOwnMac(CiTestCase):
"""
self.assertTrue(interface_has_own_mac("eth0"))
- @mock.patch('cloudinit.net.read_sys_net_int', return_value=None)
+ @mock.patch("cloudinit.net.read_sys_net_int", return_value=None)
def test_strict_with_no_addr_assign_type_raises(self, m_read_sys_net_int):
with self.assertRaises(ValueError):
interface_has_own_mac("eth0", True)
- @mock.patch('cloudinit.net.read_sys_net_int')
+ @mock.patch("cloudinit.net.read_sys_net_int")
def test_expected_values(self, m_read_sys_net_int):
msg = "address_assign_type=%d said to not have own mac"
for address_assign_type in (0, 1, 3):
m_read_sys_net_int.return_value = address_assign_type
self.assertTrue(
- interface_has_own_mac("eth0", msg % address_assign_type))
+ interface_has_own_mac("eth0", msg % address_assign_type)
+ )
m_read_sys_net_int.return_value = 2
self.assertFalse(interface_has_own_mac("eth0"))
+@mock.patch(
+ "cloudinit.net.is_openvswitch_internal_interface",
+ mock.Mock(return_value=False),
+)
class TestGetInterfacesByMac(CiTestCase):
- _data = {'bonds': ['bond1'],
- 'bridges': ['bridge1'],
- 'vlans': ['bond1.101'],
- 'own_macs': ['enp0s1', 'enp0s2', 'bridge1-nic', 'bridge1',
- 'bond1.101', 'lo'],
- 'macs': {'enp0s1': 'aa:aa:aa:aa:aa:01',
- 'enp0s2': 'aa:aa:aa:aa:aa:02',
- 'bond1': 'aa:aa:aa:aa:aa:01',
- 'bond1.101': 'aa:aa:aa:aa:aa:01',
- 'bridge1': 'aa:aa:aa:aa:aa:03',
- 'bridge1-nic': 'aa:aa:aa:aa:aa:03',
- 'lo': '00:00:00:00:00:00',
- 'greptap0': '00:00:00:00:00:00',
- 'tun0': None}}
+ _data = {
+ "bonds": ["bond1"],
+ "bridges": ["bridge1"],
+ "vlans": ["bond1.101"],
+ "own_macs": [
+ "enp0s1",
+ "enp0s2",
+ "bridge1-nic",
+ "bridge1",
+ "bond1.101",
+ "lo",
+ ],
+ "macs": {
+ "enp0s1": "aa:aa:aa:aa:aa:01",
+ "enp0s2": "aa:aa:aa:aa:aa:02",
+ "bond1": "aa:aa:aa:aa:aa:01",
+ "bond1.101": "aa:aa:aa:aa:aa:01",
+ "bridge1": "aa:aa:aa:aa:aa:03",
+ "bridge1-nic": "aa:aa:aa:aa:aa:03",
+ "lo": "00:00:00:00:00:00",
+ "greptap0": "00:00:00:00:00:00",
+ "tun0": None,
+ },
+ }
data = {}
def _se_get_devicelist(self):
- return list(self.data['devices'])
+ return list(self.data["devices"])
def _se_get_interface_mac(self, name):
- return self.data['macs'][name]
+ return self.data["macs"][name]
def _se_is_bridge(self, name):
- return name in self.data['bridges']
+ return name in self.data["bridges"]
def _se_is_vlan(self, name):
- return name in self.data['vlans']
+ return name in self.data["vlans"]
def _se_interface_has_own_mac(self, name):
- return name in self.data['own_macs']
+ return name in self.data["own_macs"]
def _se_get_ib_interface_hwaddr(self, name, ethernet_format):
- ib_hwaddr = self.data.get('ib_hwaddr', {})
+ ib_hwaddr = self.data.get("ib_hwaddr", {})
return ib_hwaddr.get(name, {}).get(ethernet_format)
def _mock_setup(self):
self.data = copy.deepcopy(self._data)
- self.data['devices'] = set(list(self.data['macs'].keys()))
- mocks = ('get_devicelist', 'get_interface_mac', 'is_bridge',
- 'interface_has_own_mac', 'is_vlan', 'get_ib_interface_hwaddr')
+ self.data["devices"] = set(list(self.data["macs"].keys()))
+ mocks = (
+ "get_devicelist",
+ "get_interface_mac",
+ "is_bridge",
+ "interface_has_own_mac",
+ "is_vlan",
+ "get_ib_interface_hwaddr",
+ )
self.mocks = {}
for n in mocks:
- m = mock.patch('cloudinit.net.' + n,
- side_effect=getattr(self, '_se_' + n))
+ m = mock.patch(
+ "cloudinit.net." + n, side_effect=getattr(self, "_se_" + n)
+ )
self.addCleanup(m.stop)
self.mocks[n] = m.start()
def test_raise_exception_on_duplicate_macs(self):
self._mock_setup()
- self.data['macs']['bridge1-nic'] = self.data['macs']['enp0s1']
+ self.data["macs"]["bridge1-nic"] = self.data["macs"]["enp0s1"]
self.assertRaises(RuntimeError, net.get_interfaces_by_mac)
def test_excludes_any_without_mac_address(self):
self._mock_setup()
ret = net.get_interfaces_by_mac()
- self.assertIn('tun0', self._se_get_devicelist())
- self.assertNotIn('tun0', ret.values())
+ self.assertIn("tun0", self._se_get_devicelist())
+ self.assertNotIn("tun0", ret.values())
def test_excludes_stolen_macs(self):
self._mock_setup()
ret = net.get_interfaces_by_mac()
- self.mocks['interface_has_own_mac'].assert_has_calls(
- [mock.call('enp0s1'), mock.call('bond1')], any_order=True)
+ self.mocks["interface_has_own_mac"].assert_has_calls(
+ [mock.call("enp0s1"), mock.call("bond1")], any_order=True
+ )
self.assertEqual(
- {'aa:aa:aa:aa:aa:01': 'enp0s1', 'aa:aa:aa:aa:aa:02': 'enp0s2',
- 'aa:aa:aa:aa:aa:03': 'bridge1-nic', '00:00:00:00:00:00': 'lo'},
- ret)
+ {
+ "aa:aa:aa:aa:aa:01": "enp0s1",
+ "aa:aa:aa:aa:aa:02": "enp0s2",
+ "aa:aa:aa:aa:aa:03": "bridge1-nic",
+ "00:00:00:00:00:00": "lo",
+ },
+ ret,
+ )
def test_excludes_bridges(self):
self._mock_setup()
# add a device 'b1', make all return they have their "own mac",
# set everything other than 'b1' to be a bridge.
# then expect b1 is the only thing left.
- self.data['macs']['b1'] = 'aa:aa:aa:aa:aa:b1'
- self.data['devices'].add('b1')
- self.data['bonds'] = []
- self.data['own_macs'] = self.data['devices']
- self.data['bridges'] = [f for f in self.data['devices'] if f != "b1"]
+ self.data["macs"]["b1"] = "aa:aa:aa:aa:aa:b1"
+ self.data["devices"].add("b1")
+ self.data["bonds"] = []
+ self.data["own_macs"] = self.data["devices"]
+ self.data["bridges"] = [f for f in self.data["devices"] if f != "b1"]
ret = net.get_interfaces_by_mac()
- self.assertEqual({'aa:aa:aa:aa:aa:b1': 'b1'}, ret)
- self.mocks['is_bridge'].assert_has_calls(
- [mock.call('bridge1'), mock.call('enp0s1'), mock.call('bond1'),
- mock.call('b1')],
- any_order=True)
+ self.assertEqual({"aa:aa:aa:aa:aa:b1": "b1"}, ret)
+ self.mocks["is_bridge"].assert_has_calls(
+ [
+ mock.call("bridge1"),
+ mock.call("enp0s1"),
+ mock.call("bond1"),
+ mock.call("b1"),
+ ],
+ any_order=True,
+ )
def test_excludes_vlans(self):
self._mock_setup()
# add a device 'b1', make all return they have their "own mac",
# set everything other than 'b1' to be a vlan.
# then expect b1 is the only thing left.
- self.data['macs']['b1'] = 'aa:aa:aa:aa:aa:b1'
- self.data['devices'].add('b1')
- self.data['bonds'] = []
- self.data['bridges'] = []
- self.data['own_macs'] = self.data['devices']
- self.data['vlans'] = [f for f in self.data['devices'] if f != "b1"]
+ self.data["macs"]["b1"] = "aa:aa:aa:aa:aa:b1"
+ self.data["devices"].add("b1")
+ self.data["bonds"] = []
+ self.data["bridges"] = []
+ self.data["own_macs"] = self.data["devices"]
+ self.data["vlans"] = [f for f in self.data["devices"] if f != "b1"]
ret = net.get_interfaces_by_mac()
- self.assertEqual({'aa:aa:aa:aa:aa:b1': 'b1'}, ret)
- self.mocks['is_vlan'].assert_has_calls(
- [mock.call('bridge1'), mock.call('enp0s1'), mock.call('bond1'),
- mock.call('b1')],
- any_order=True)
+ self.assertEqual({"aa:aa:aa:aa:aa:b1": "b1"}, ret)
+ self.mocks["is_vlan"].assert_has_calls(
+ [
+ mock.call("bridge1"),
+ mock.call("enp0s1"),
+ mock.call("bond1"),
+ mock.call("b1"),
+ ],
+ any_order=True,
+ )
def test_duplicates_of_empty_mac_are_ok(self):
"""Duplicate macs of 00:00:00:00:00:00 should be skipped."""
self._mock_setup()
empty_mac = "00:00:00:00:00:00"
- addnics = ('greptap1', 'lo', 'greptap2')
- self.data['macs'].update(dict((k, empty_mac) for k in addnics))
- self.data['devices'].update(set(addnics))
- self.data['own_macs'].extend(list(addnics))
+ addnics = ("greptap1", "lo", "greptap2")
+ self.data["macs"].update(dict((k, empty_mac) for k in addnics))
+ self.data["devices"].update(set(addnics))
+ self.data["own_macs"].extend(list(addnics))
ret = net.get_interfaces_by_mac()
- self.assertEqual('lo', ret[empty_mac])
+ self.assertEqual("lo", ret[empty_mac])
def test_skip_all_zeros(self):
"""Any mac of 00:... should be skipped."""
self._mock_setup()
emac1, emac2, emac4, emac6 = (
- '00', '00:00', '00:00:00:00', '00:00:00:00:00:00')
- addnics = {'empty1': emac1, 'emac2a': emac2, 'emac2b': emac2,
- 'emac4': emac4, 'emac6': emac6}
- self.data['macs'].update(addnics)
- self.data['devices'].update(set(addnics))
- self.data['own_macs'].extend(addnics.keys())
+ "00",
+ "00:00",
+ "00:00:00:00",
+ "00:00:00:00:00:00",
+ )
+ addnics = {
+ "empty1": emac1,
+ "emac2a": emac2,
+ "emac2b": emac2,
+ "emac4": emac4,
+ "emac6": emac6,
+ }
+ self.data["macs"].update(addnics)
+ self.data["devices"].update(set(addnics))
+ self.data["own_macs"].extend(addnics.keys())
ret = net.get_interfaces_by_mac()
- self.assertEqual('lo', ret['00:00:00:00:00:00'])
+ self.assertEqual("lo", ret["00:00:00:00:00:00"])
def test_ib(self):
- ib_addr = '80:00:00:28:fe:80:00:00:00:00:00:00:00:11:22:03:00:33:44:56'
- ib_addr_eth_format = '00:11:22:33:44:56'
+ ib_addr = "80:00:00:28:fe:80:00:00:00:00:00:00:00:11:22:03:00:33:44:56"
+ ib_addr_eth_format = "00:11:22:33:44:56"
self._mock_setup()
- self.data['devices'] = ['enp0s1', 'ib0']
- self.data['own_macs'].append('ib0')
- self.data['macs']['ib0'] = ib_addr
- self.data['ib_hwaddr'] = {'ib0': {True: ib_addr_eth_format,
- False: ib_addr}}
+ self.data["devices"] = ["enp0s1", "ib0"]
+ self.data["own_macs"].append("ib0")
+ self.data["macs"]["ib0"] = ib_addr
+ self.data["ib_hwaddr"] = {
+ "ib0": {True: ib_addr_eth_format, False: ib_addr}
+ }
result = net.get_interfaces_by_mac()
- expected = {'aa:aa:aa:aa:aa:01': 'enp0s1',
- ib_addr_eth_format: 'ib0', ib_addr: 'ib0'}
+ expected = {
+ "aa:aa:aa:aa:aa:01": "enp0s1",
+ ib_addr_eth_format: "ib0",
+ ib_addr: "ib0",
+ }
self.assertEqual(expected, result)
class TestInterfacesSorting(CiTestCase):
-
def test_natural_order(self):
- data = ['ens5', 'ens6', 'ens3', 'ens20', 'ens13', 'ens2']
+ data = ["ens5", "ens6", "ens3", "ens20", "ens13", "ens2"]
self.assertEqual(
sorted(data, key=natural_sort_key),
- ['ens2', 'ens3', 'ens5', 'ens6', 'ens13', 'ens20'])
- data2 = ['enp2s0', 'enp2s3', 'enp0s3', 'enp0s13', 'enp0s8', 'enp1s2']
+ ["ens2", "ens3", "ens5", "ens6", "ens13", "ens20"],
+ )
+ data2 = ["enp2s0", "enp2s3", "enp0s3", "enp0s13", "enp0s8", "enp1s2"]
self.assertEqual(
sorted(data2, key=natural_sort_key),
- ['enp0s3', 'enp0s8', 'enp0s13', 'enp1s2', 'enp2s0', 'enp2s3'])
+ ["enp0s3", "enp0s8", "enp0s13", "enp1s2", "enp2s0", "enp2s3"],
+ )
+@mock.patch(
+ "cloudinit.net.is_openvswitch_internal_interface",
+ mock.Mock(return_value=False),
+)
class TestGetIBHwaddrsByInterface(CiTestCase):
- _ib_addr = '80:00:00:28:fe:80:00:00:00:00:00:00:00:11:22:03:00:33:44:56'
- _ib_addr_eth_format = '00:11:22:33:44:56'
- _data = {'devices': ['enp0s1', 'enp0s2', 'bond1', 'bridge1',
- 'bridge1-nic', 'tun0', 'ib0'],
- 'bonds': ['bond1'],
- 'bridges': ['bridge1'],
- 'own_macs': ['enp0s1', 'enp0s2', 'bridge1-nic', 'bridge1', 'ib0'],
- 'macs': {'enp0s1': 'aa:aa:aa:aa:aa:01',
- 'enp0s2': 'aa:aa:aa:aa:aa:02',
- 'bond1': 'aa:aa:aa:aa:aa:01',
- 'bridge1': 'aa:aa:aa:aa:aa:03',
- 'bridge1-nic': 'aa:aa:aa:aa:aa:03',
- 'tun0': None,
- 'ib0': _ib_addr},
- 'ib_hwaddr': {'ib0': {True: _ib_addr_eth_format,
- False: _ib_addr}}}
+ _ib_addr = "80:00:00:28:fe:80:00:00:00:00:00:00:00:11:22:03:00:33:44:56"
+ _ib_addr_eth_format = "00:11:22:33:44:56"
+ _data = {
+ "devices": [
+ "enp0s1",
+ "enp0s2",
+ "bond1",
+ "bridge1",
+ "bridge1-nic",
+ "tun0",
+ "ib0",
+ ],
+ "bonds": ["bond1"],
+ "bridges": ["bridge1"],
+ "own_macs": ["enp0s1", "enp0s2", "bridge1-nic", "bridge1", "ib0"],
+ "macs": {
+ "enp0s1": "aa:aa:aa:aa:aa:01",
+ "enp0s2": "aa:aa:aa:aa:aa:02",
+ "bond1": "aa:aa:aa:aa:aa:01",
+ "bridge1": "aa:aa:aa:aa:aa:03",
+ "bridge1-nic": "aa:aa:aa:aa:aa:03",
+ "tun0": None,
+ "ib0": _ib_addr,
+ },
+ "ib_hwaddr": {"ib0": {True: _ib_addr_eth_format, False: _ib_addr}},
+ }
data = {}
def _mock_setup(self):
self.data = copy.deepcopy(self._data)
- mocks = ('get_devicelist', 'get_interface_mac', 'is_bridge',
- 'interface_has_own_mac', 'get_ib_interface_hwaddr')
+ mocks = (
+ "get_devicelist",
+ "get_interface_mac",
+ "is_bridge",
+ "interface_has_own_mac",
+ "get_ib_interface_hwaddr",
+ )
self.mocks = {}
for n in mocks:
- m = mock.patch('cloudinit.net.' + n,
- side_effect=getattr(self, '_se_' + n))
+ m = mock.patch(
+ "cloudinit.net." + n, side_effect=getattr(self, "_se_" + n)
+ )
self.addCleanup(m.stop)
self.mocks[n] = m.start()
def _se_get_devicelist(self):
- return self.data['devices']
+ return self.data["devices"]
def _se_get_interface_mac(self, name):
- return self.data['macs'][name]
+ return self.data["macs"][name]
def _se_is_bridge(self, name):
- return name in self.data['bridges']
+ return name in self.data["bridges"]
def _se_interface_has_own_mac(self, name):
- return name in self.data['own_macs']
+ return name in self.data["own_macs"]
def _se_get_ib_interface_hwaddr(self, name, ethernet_format):
- ib_hwaddr = self.data.get('ib_hwaddr', {})
+ ib_hwaddr = self.data.get("ib_hwaddr", {})
return ib_hwaddr.get(name, {}).get(ethernet_format)
def test_ethernet(self):
self._mock_setup()
- self.data['devices'].remove('ib0')
+ self.data["devices"].remove("ib0")
result = net.get_ib_hwaddrs_by_interface()
expected = {}
self.assertEqual(expected, result)
@@ -5391,7 +6766,7 @@ class TestGetIBHwaddrsByInterface(CiTestCase):
def test_ib(self):
self._mock_setup()
result = net.get_ib_hwaddrs_by_interface()
- expected = {'ib0': self._ib_addr}
+ expected = {"ib0": self._ib_addr}
self.assertEqual(expected, result)
@@ -5404,239 +6779,305 @@ def _gzip_data(data):
class TestRenameInterfaces(CiTestCase):
-
- @mock.patch('cloudinit.subp.subp')
+ @mock.patch("cloudinit.subp.subp")
def test_rename_all(self, mock_subp):
renames = [
- ('00:11:22:33:44:55', 'interface0', 'virtio_net', '0x3'),
- ('00:11:22:33:44:aa', 'interface2', 'virtio_net', '0x5'),
+ ("00:11:22:33:44:55", "interface0", "virtio_net", "0x3"),
+ ("00:11:22:33:44:aa", "interface2", "virtio_net", "0x5"),
]
current_info = {
- 'ens3': {
- 'downable': True,
- 'device_id': '0x3',
- 'driver': 'virtio_net',
- 'mac': '00:11:22:33:44:55',
- 'name': 'ens3',
- 'up': False},
- 'ens5': {
- 'downable': True,
- 'device_id': '0x5',
- 'driver': 'virtio_net',
- 'mac': '00:11:22:33:44:aa',
- 'name': 'ens5',
- 'up': False},
+ "ens3": {
+ "downable": True,
+ "device_id": "0x3",
+ "driver": "virtio_net",
+ "mac": "00:11:22:33:44:55",
+ "name": "ens3",
+ "up": False,
+ },
+ "ens5": {
+ "downable": True,
+ "device_id": "0x5",
+ "driver": "virtio_net",
+ "mac": "00:11:22:33:44:aa",
+ "name": "ens5",
+ "up": False,
+ },
}
net._rename_interfaces(renames, current_info=current_info)
print(mock_subp.call_args_list)
- mock_subp.assert_has_calls([
- mock.call(['ip', 'link', 'set', 'ens3', 'name', 'interface0'],
- capture=True),
- mock.call(['ip', 'link', 'set', 'ens5', 'name', 'interface2'],
- capture=True),
- ])
-
- @mock.patch('cloudinit.subp.subp')
+ mock_subp.assert_has_calls(
+ [
+ mock.call(
+ ["ip", "link", "set", "ens3", "name", "interface0"],
+ capture=True,
+ ),
+ mock.call(
+ ["ip", "link", "set", "ens5", "name", "interface2"],
+ capture=True,
+ ),
+ ]
+ )
+
+ @mock.patch("cloudinit.subp.subp")
def test_rename_no_driver_no_device_id(self, mock_subp):
renames = [
- ('00:11:22:33:44:55', 'interface0', None, None),
- ('00:11:22:33:44:aa', 'interface1', None, None),
+ ("00:11:22:33:44:55", "interface0", None, None),
+ ("00:11:22:33:44:aa", "interface1", None, None),
]
current_info = {
- 'eth0': {
- 'downable': True,
- 'device_id': None,
- 'driver': None,
- 'mac': '00:11:22:33:44:55',
- 'name': 'eth0',
- 'up': False},
- 'eth1': {
- 'downable': True,
- 'device_id': None,
- 'driver': None,
- 'mac': '00:11:22:33:44:aa',
- 'name': 'eth1',
- 'up': False},
+ "eth0": {
+ "downable": True,
+ "device_id": None,
+ "driver": None,
+ "mac": "00:11:22:33:44:55",
+ "name": "eth0",
+ "up": False,
+ },
+ "eth1": {
+ "downable": True,
+ "device_id": None,
+ "driver": None,
+ "mac": "00:11:22:33:44:aa",
+ "name": "eth1",
+ "up": False,
+ },
}
net._rename_interfaces(renames, current_info=current_info)
print(mock_subp.call_args_list)
- mock_subp.assert_has_calls([
- mock.call(['ip', 'link', 'set', 'eth0', 'name', 'interface0'],
- capture=True),
- mock.call(['ip', 'link', 'set', 'eth1', 'name', 'interface1'],
- capture=True),
- ])
-
- @mock.patch('cloudinit.subp.subp')
+ mock_subp.assert_has_calls(
+ [
+ mock.call(
+ ["ip", "link", "set", "eth0", "name", "interface0"],
+ capture=True,
+ ),
+ mock.call(
+ ["ip", "link", "set", "eth1", "name", "interface1"],
+ capture=True,
+ ),
+ ]
+ )
+
+ @mock.patch("cloudinit.subp.subp")
def test_rename_all_bounce(self, mock_subp):
renames = [
- ('00:11:22:33:44:55', 'interface0', 'virtio_net', '0x3'),
- ('00:11:22:33:44:aa', 'interface2', 'virtio_net', '0x5'),
+ ("00:11:22:33:44:55", "interface0", "virtio_net", "0x3"),
+ ("00:11:22:33:44:aa", "interface2", "virtio_net", "0x5"),
]
current_info = {
- 'ens3': {
- 'downable': True,
- 'device_id': '0x3',
- 'driver': 'virtio_net',
- 'mac': '00:11:22:33:44:55',
- 'name': 'ens3',
- 'up': True},
- 'ens5': {
- 'downable': True,
- 'device_id': '0x5',
- 'driver': 'virtio_net',
- 'mac': '00:11:22:33:44:aa',
- 'name': 'ens5',
- 'up': True},
+ "ens3": {
+ "downable": True,
+ "device_id": "0x3",
+ "driver": "virtio_net",
+ "mac": "00:11:22:33:44:55",
+ "name": "ens3",
+ "up": True,
+ },
+ "ens5": {
+ "downable": True,
+ "device_id": "0x5",
+ "driver": "virtio_net",
+ "mac": "00:11:22:33:44:aa",
+ "name": "ens5",
+ "up": True,
+ },
}
net._rename_interfaces(renames, current_info=current_info)
print(mock_subp.call_args_list)
- mock_subp.assert_has_calls([
- mock.call(['ip', 'link', 'set', 'ens3', 'down'], capture=True),
- mock.call(['ip', 'link', 'set', 'ens3', 'name', 'interface0'],
- capture=True),
- mock.call(['ip', 'link', 'set', 'ens5', 'down'], capture=True),
- mock.call(['ip', 'link', 'set', 'ens5', 'name', 'interface2'],
- capture=True),
- mock.call(['ip', 'link', 'set', 'interface0', 'up'], capture=True),
- mock.call(['ip', 'link', 'set', 'interface2', 'up'], capture=True)
- ])
-
- @mock.patch('cloudinit.subp.subp')
+ mock_subp.assert_has_calls(
+ [
+ mock.call(["ip", "link", "set", "ens3", "down"], capture=True),
+ mock.call(
+ ["ip", "link", "set", "ens3", "name", "interface0"],
+ capture=True,
+ ),
+ mock.call(["ip", "link", "set", "ens5", "down"], capture=True),
+ mock.call(
+ ["ip", "link", "set", "ens5", "name", "interface2"],
+ capture=True,
+ ),
+ mock.call(
+ ["ip", "link", "set", "interface0", "up"], capture=True
+ ),
+ mock.call(
+ ["ip", "link", "set", "interface2", "up"], capture=True
+ ),
+ ]
+ )
+
+ @mock.patch("cloudinit.subp.subp")
def test_rename_duplicate_macs(self, mock_subp):
renames = [
- ('00:11:22:33:44:55', 'eth0', 'hv_netsvc', '0x3'),
- ('00:11:22:33:44:55', 'vf1', 'mlx4_core', '0x5'),
+ ("00:11:22:33:44:55", "eth0", "hv_netsvc", "0x3"),
+ ("00:11:22:33:44:55", "vf1", "mlx4_core", "0x5"),
]
current_info = {
- 'eth0': {
- 'downable': True,
- 'device_id': '0x3',
- 'driver': 'hv_netsvc',
- 'mac': '00:11:22:33:44:55',
- 'name': 'eth0',
- 'up': False},
- 'eth1': {
- 'downable': True,
- 'device_id': '0x5',
- 'driver': 'mlx4_core',
- 'mac': '00:11:22:33:44:55',
- 'name': 'eth1',
- 'up': False},
+ "eth0": {
+ "downable": True,
+ "device_id": "0x3",
+ "driver": "hv_netsvc",
+ "mac": "00:11:22:33:44:55",
+ "name": "eth0",
+ "up": False,
+ },
+ "eth1": {
+ "downable": True,
+ "device_id": "0x5",
+ "driver": "mlx4_core",
+ "mac": "00:11:22:33:44:55",
+ "name": "eth1",
+ "up": False,
+ },
}
net._rename_interfaces(renames, current_info=current_info)
print(mock_subp.call_args_list)
- mock_subp.assert_has_calls([
- mock.call(['ip', 'link', 'set', 'eth1', 'name', 'vf1'],
- capture=True),
- ])
+ mock_subp.assert_has_calls(
+ [
+ mock.call(
+ ["ip", "link", "set", "eth1", "name", "vf1"], capture=True
+ ),
+ ]
+ )
- @mock.patch('cloudinit.subp.subp')
+ @mock.patch("cloudinit.subp.subp")
def test_rename_duplicate_macs_driver_no_devid(self, mock_subp):
renames = [
- ('00:11:22:33:44:55', 'eth0', 'hv_netsvc', None),
- ('00:11:22:33:44:55', 'vf1', 'mlx4_core', None),
+ ("00:11:22:33:44:55", "eth0", "hv_netsvc", None),
+ ("00:11:22:33:44:55", "vf1", "mlx4_core", None),
]
current_info = {
- 'eth0': {
- 'downable': True,
- 'device_id': '0x3',
- 'driver': 'hv_netsvc',
- 'mac': '00:11:22:33:44:55',
- 'name': 'eth0',
- 'up': False},
- 'eth1': {
- 'downable': True,
- 'device_id': '0x5',
- 'driver': 'mlx4_core',
- 'mac': '00:11:22:33:44:55',
- 'name': 'eth1',
- 'up': False},
+ "eth0": {
+ "downable": True,
+ "device_id": "0x3",
+ "driver": "hv_netsvc",
+ "mac": "00:11:22:33:44:55",
+ "name": "eth0",
+ "up": False,
+ },
+ "eth1": {
+ "downable": True,
+ "device_id": "0x5",
+ "driver": "mlx4_core",
+ "mac": "00:11:22:33:44:55",
+ "name": "eth1",
+ "up": False,
+ },
}
net._rename_interfaces(renames, current_info=current_info)
print(mock_subp.call_args_list)
- mock_subp.assert_has_calls([
- mock.call(['ip', 'link', 'set', 'eth1', 'name', 'vf1'],
- capture=True),
- ])
+ mock_subp.assert_has_calls(
+ [
+ mock.call(
+ ["ip", "link", "set", "eth1", "name", "vf1"], capture=True
+ ),
+ ]
+ )
- @mock.patch('cloudinit.subp.subp')
+ @mock.patch("cloudinit.subp.subp")
def test_rename_multi_mac_dups(self, mock_subp):
renames = [
- ('00:11:22:33:44:55', 'eth0', 'hv_netsvc', '0x3'),
- ('00:11:22:33:44:55', 'vf1', 'mlx4_core', '0x5'),
- ('00:11:22:33:44:55', 'vf2', 'mlx4_core', '0x7'),
+ ("00:11:22:33:44:55", "eth0", "hv_netsvc", "0x3"),
+ ("00:11:22:33:44:55", "vf1", "mlx4_core", "0x5"),
+ ("00:11:22:33:44:55", "vf2", "mlx4_core", "0x7"),
]
current_info = {
- 'eth0': {
- 'downable': True,
- 'device_id': '0x3',
- 'driver': 'hv_netsvc',
- 'mac': '00:11:22:33:44:55',
- 'name': 'eth0',
- 'up': False},
- 'eth1': {
- 'downable': True,
- 'device_id': '0x5',
- 'driver': 'mlx4_core',
- 'mac': '00:11:22:33:44:55',
- 'name': 'eth1',
- 'up': False},
- 'eth2': {
- 'downable': True,
- 'device_id': '0x7',
- 'driver': 'mlx4_core',
- 'mac': '00:11:22:33:44:55',
- 'name': 'eth2',
- 'up': False},
+ "eth0": {
+ "downable": True,
+ "device_id": "0x3",
+ "driver": "hv_netsvc",
+ "mac": "00:11:22:33:44:55",
+ "name": "eth0",
+ "up": False,
+ },
+ "eth1": {
+ "downable": True,
+ "device_id": "0x5",
+ "driver": "mlx4_core",
+ "mac": "00:11:22:33:44:55",
+ "name": "eth1",
+ "up": False,
+ },
+ "eth2": {
+ "downable": True,
+ "device_id": "0x7",
+ "driver": "mlx4_core",
+ "mac": "00:11:22:33:44:55",
+ "name": "eth2",
+ "up": False,
+ },
}
net._rename_interfaces(renames, current_info=current_info)
print(mock_subp.call_args_list)
- mock_subp.assert_has_calls([
- mock.call(['ip', 'link', 'set', 'eth1', 'name', 'vf1'],
- capture=True),
- mock.call(['ip', 'link', 'set', 'eth2', 'name', 'vf2'],
- capture=True),
- ])
-
- @mock.patch('cloudinit.subp.subp')
+ mock_subp.assert_has_calls(
+ [
+ mock.call(
+ ["ip", "link", "set", "eth1", "name", "vf1"], capture=True
+ ),
+ mock.call(
+ ["ip", "link", "set", "eth2", "name", "vf2"], capture=True
+ ),
+ ]
+ )
+
+ @mock.patch("cloudinit.subp.subp")
def test_rename_macs_case_insensitive(self, mock_subp):
"""_rename_interfaces must support upper or lower case macs."""
renames = [
- ('aa:aa:aa:aa:aa:aa', 'en0', None, None),
- ('BB:BB:BB:BB:BB:BB', 'en1', None, None),
- ('cc:cc:cc:cc:cc:cc', 'en2', None, None),
- ('DD:DD:DD:DD:DD:DD', 'en3', None, None),
+ ("aa:aa:aa:aa:aa:aa", "en0", None, None),
+ ("BB:BB:BB:BB:BB:BB", "en1", None, None),
+ ("cc:cc:cc:cc:cc:cc", "en2", None, None),
+ ("DD:DD:DD:DD:DD:DD", "en3", None, None),
]
current_info = {
- 'eth0': {'downable': True, 'mac': 'AA:AA:AA:AA:AA:AA',
- 'name': 'eth0', 'up': False},
- 'eth1': {'downable': True, 'mac': 'bb:bb:bb:bb:bb:bb',
- 'name': 'eth1', 'up': False},
- 'eth2': {'downable': True, 'mac': 'cc:cc:cc:cc:cc:cc',
- 'name': 'eth2', 'up': False},
- 'eth3': {'downable': True, 'mac': 'DD:DD:DD:DD:DD:DD',
- 'name': 'eth3', 'up': False},
+ "eth0": {
+ "downable": True,
+ "mac": "AA:AA:AA:AA:AA:AA",
+ "name": "eth0",
+ "up": False,
+ },
+ "eth1": {
+ "downable": True,
+ "mac": "bb:bb:bb:bb:bb:bb",
+ "name": "eth1",
+ "up": False,
+ },
+ "eth2": {
+ "downable": True,
+ "mac": "cc:cc:cc:cc:cc:cc",
+ "name": "eth2",
+ "up": False,
+ },
+ "eth3": {
+ "downable": True,
+ "mac": "DD:DD:DD:DD:DD:DD",
+ "name": "eth3",
+ "up": False,
+ },
}
net._rename_interfaces(renames, current_info=current_info)
expected = [
- mock.call(['ip', 'link', 'set', 'eth%d' % i, 'name', 'en%d' % i],
- capture=True)
- for i in range(len(renames))]
+ mock.call(
+ ["ip", "link", "set", "eth%d" % i, "name", "en%d" % i],
+ capture=True,
+ )
+ for i in range(len(renames))
+ ]
mock_subp.assert_has_calls(expected)
class TestNetworkState(CiTestCase):
-
def test_bcast_addr(self):
"""Test mask_and_ipv4_to_bcast_addr proper execution."""
bcast_addr = network_state.mask_and_ipv4_to_bcast_addr
- self.assertEqual("192.168.1.255",
- bcast_addr("255.255.255.0", "192.168.1.1"))
- self.assertEqual("128.42.7.255",
- bcast_addr("255.255.248.0", "128.42.5.4"))
- self.assertEqual("10.1.21.255",
- bcast_addr("255.255.255.0", "10.1.21.4"))
+ self.assertEqual(
+ "192.168.1.255", bcast_addr("255.255.255.0", "192.168.1.1")
+ )
+ self.assertEqual(
+ "128.42.7.255", bcast_addr("255.255.248.0", "128.42.5.4")
+ )
+ self.assertEqual(
+ "10.1.21.255", bcast_addr("255.255.255.0", "10.1.21.4")
+ )
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_net_activators.py b/tests/unittests/test_net_activators.py
new file mode 100644
index 00000000..3c29e2f7
--- /dev/null
+++ b/tests/unittests/test_net_activators.py
@@ -0,0 +1,262 @@
+from collections import namedtuple
+from unittest.mock import patch
+
+import pytest
+
+from cloudinit.net.activators import (
+ DEFAULT_PRIORITY,
+ IfUpDownActivator,
+ NetplanActivator,
+ NetworkdActivator,
+ NetworkManagerActivator,
+ NoActivatorException,
+ search_activator,
+ select_activator,
+)
+from cloudinit.net.network_state import parse_net_config_data
+from cloudinit.safeyaml import load
+
+V1_CONFIG = """\
+version: 1
+config:
+- type: physical
+ name: eth0
+- type: physical
+ name: eth1
+"""
+
+V2_CONFIG = """\
+version: 2
+ethernets:
+ eth0:
+ dhcp4: true
+ eth1:
+ dhcp4: true
+"""
+
+NETPLAN_CALL_LIST = [
+ ((["netplan", "apply"],), {}),
+]
+
+
+@pytest.fixture
+def available_mocks():
+ mocks = namedtuple("Mocks", "m_which, m_file")
+ with patch("cloudinit.subp.which", return_value=True) as m_which:
+ with patch("os.path.isfile", return_value=True) as m_file:
+ yield mocks(m_which, m_file)
+
+
+@pytest.fixture
+def unavailable_mocks():
+ mocks = namedtuple("Mocks", "m_which, m_file")
+ with patch("cloudinit.subp.which", return_value=False) as m_which:
+ with patch("os.path.isfile", return_value=False) as m_file:
+ yield mocks(m_which, m_file)
+
+
+class TestSearchAndSelect:
+ def test_defaults(self, available_mocks):
+ resp = search_activator()
+ assert resp == DEFAULT_PRIORITY
+
+ activator = select_activator()
+ assert activator == DEFAULT_PRIORITY[0]
+
+ def test_priority(self, available_mocks):
+ new_order = [NetplanActivator, NetworkManagerActivator]
+ resp = search_activator(priority=new_order)
+ assert resp == new_order
+
+ activator = select_activator(priority=new_order)
+ assert activator == new_order[0]
+
+ def test_target(self, available_mocks):
+ search_activator(target="/tmp")
+ assert "/tmp" == available_mocks.m_which.call_args[1]["target"]
+
+ select_activator(target="/tmp")
+ assert "/tmp" == available_mocks.m_which.call_args[1]["target"]
+
+ @patch(
+ "cloudinit.net.activators.IfUpDownActivator.available",
+ return_value=False,
+ )
+ def test_first_not_available(self, m_available, available_mocks):
+ resp = search_activator()
+ assert resp == DEFAULT_PRIORITY[1:]
+
+ resp = select_activator()
+ assert resp == DEFAULT_PRIORITY[1]
+
+ def test_priority_not_exist(self, available_mocks):
+ with pytest.raises(ValueError):
+ search_activator(priority=["spam", "eggs"])
+ with pytest.raises(ValueError):
+ select_activator(priority=["spam", "eggs"])
+
+ def test_none_available(self, unavailable_mocks):
+ resp = search_activator()
+ assert resp == []
+
+ with pytest.raises(NoActivatorException):
+ select_activator()
+
+
+IF_UP_DOWN_AVAILABLE_CALLS = [
+ (("ifquery",), {"search": ["/sbin", "/usr/sbin"], "target": None}),
+ (("ifup",), {"search": ["/sbin", "/usr/sbin"], "target": None}),
+ (("ifdown",), {"search": ["/sbin", "/usr/sbin"], "target": None}),
+]
+
+NETPLAN_AVAILABLE_CALLS = [
+ (("netplan",), {"search": ["/usr/sbin", "/sbin"], "target": None}),
+]
+
+NETWORK_MANAGER_AVAILABLE_CALLS = [
+ (("nmcli",), {"target": None}),
+]
+
+NETWORKD_AVAILABLE_CALLS = [
+ (("ip",), {"search": ["/usr/sbin", "/bin"], "target": None}),
+ (("systemctl",), {"search": ["/usr/sbin", "/bin"], "target": None}),
+]
+
+
+@pytest.mark.parametrize(
+ "activator, available_calls",
+ [
+ (IfUpDownActivator, IF_UP_DOWN_AVAILABLE_CALLS),
+ (NetplanActivator, NETPLAN_AVAILABLE_CALLS),
+ (NetworkManagerActivator, NETWORK_MANAGER_AVAILABLE_CALLS),
+ (NetworkdActivator, NETWORKD_AVAILABLE_CALLS),
+ ],
+)
+class TestActivatorsAvailable:
+ def test_available(self, activator, available_calls, available_mocks):
+ activator.available()
+ assert available_mocks.m_which.call_args_list == available_calls
+
+
+IF_UP_DOWN_BRING_UP_CALL_LIST = [
+ ((["ifup", "eth0"],), {}),
+ ((["ifup", "eth1"],), {}),
+]
+
+NETWORK_MANAGER_BRING_UP_CALL_LIST = [
+ ((["nmcli", "connection", "up", "ifname", "eth0"],), {}),
+ ((["nmcli", "connection", "up", "ifname", "eth1"],), {}),
+]
+
+NETWORKD_BRING_UP_CALL_LIST = [
+ ((["ip", "link", "set", "up", "eth0"],), {}),
+ ((["ip", "link", "set", "up", "eth1"],), {}),
+ ((["systemctl", "restart", "systemd-networkd", "systemd-resolved"],), {}),
+]
+
+
+@pytest.mark.parametrize(
+ "activator, expected_call_list",
+ [
+ (IfUpDownActivator, IF_UP_DOWN_BRING_UP_CALL_LIST),
+ (NetplanActivator, NETPLAN_CALL_LIST),
+ (NetworkManagerActivator, NETWORK_MANAGER_BRING_UP_CALL_LIST),
+ (NetworkdActivator, NETWORKD_BRING_UP_CALL_LIST),
+ ],
+)
+class TestActivatorsBringUp:
+ @patch("cloudinit.subp.subp", return_value=("", ""))
+ def test_bring_up_interface(
+ self, m_subp, activator, expected_call_list, available_mocks
+ ):
+ activator.bring_up_interface("eth0")
+ assert len(m_subp.call_args_list) == 1
+ assert m_subp.call_args_list[0] == expected_call_list[0]
+
+ @patch("cloudinit.subp.subp", return_value=("", ""))
+ def test_bring_up_interfaces(
+ self, m_subp, activator, expected_call_list, available_mocks
+ ):
+ index = 0
+ activator.bring_up_interfaces(["eth0", "eth1"])
+ for call in m_subp.call_args_list:
+ assert call == expected_call_list[index]
+ index += 1
+
+ @patch("cloudinit.subp.subp", return_value=("", ""))
+ def test_bring_up_all_interfaces_v1(
+ self, m_subp, activator, expected_call_list, available_mocks
+ ):
+ network_state = parse_net_config_data(load(V1_CONFIG))
+ activator.bring_up_all_interfaces(network_state)
+ for call in m_subp.call_args_list:
+ assert call in expected_call_list
+
+ @patch("cloudinit.subp.subp", return_value=("", ""))
+ def test_bring_up_all_interfaces_v2(
+ self, m_subp, activator, expected_call_list, available_mocks
+ ):
+ network_state = parse_net_config_data(load(V2_CONFIG))
+ activator.bring_up_all_interfaces(network_state)
+ for call in m_subp.call_args_list:
+ assert call in expected_call_list
+
+
+IF_UP_DOWN_BRING_DOWN_CALL_LIST = [
+ ((["ifdown", "eth0"],), {}),
+ ((["ifdown", "eth1"],), {}),
+]
+
+NETWORK_MANAGER_BRING_DOWN_CALL_LIST = [
+ ((["nmcli", "connection", "down", "eth0"],), {}),
+ ((["nmcli", "connection", "down", "eth1"],), {}),
+]
+
+NETWORKD_BRING_DOWN_CALL_LIST = [
+ ((["ip", "link", "set", "down", "eth0"],), {}),
+ ((["ip", "link", "set", "down", "eth1"],), {}),
+]
+
+
+@pytest.mark.parametrize(
+ "activator, expected_call_list",
+ [
+ (IfUpDownActivator, IF_UP_DOWN_BRING_DOWN_CALL_LIST),
+ (NetplanActivator, NETPLAN_CALL_LIST),
+ (NetworkManagerActivator, NETWORK_MANAGER_BRING_DOWN_CALL_LIST),
+ (NetworkdActivator, NETWORKD_BRING_DOWN_CALL_LIST),
+ ],
+)
+class TestActivatorsBringDown:
+ @patch("cloudinit.subp.subp", return_value=("", ""))
+ def test_bring_down_interface(
+ self, m_subp, activator, expected_call_list, available_mocks
+ ):
+ activator.bring_down_interface("eth0")
+ assert len(m_subp.call_args_list) == 1
+ assert m_subp.call_args_list[0] == expected_call_list[0]
+
+ @patch("cloudinit.subp.subp", return_value=("", ""))
+ def test_bring_down_interfaces(
+ self, m_subp, activator, expected_call_list, available_mocks
+ ):
+ activator.bring_down_interfaces(["eth0", "eth1"])
+ assert expected_call_list == m_subp.call_args_list
+
+ @patch("cloudinit.subp.subp", return_value=("", ""))
+ def test_bring_down_all_interfaces_v1(
+ self, m_subp, activator, expected_call_list, available_mocks
+ ):
+ network_state = parse_net_config_data(load(V1_CONFIG))
+ activator.bring_down_all_interfaces(network_state)
+ for call in m_subp.call_args_list:
+ assert call in expected_call_list
+
+ @patch("cloudinit.subp.subp", return_value=("", ""))
+ def test_bring_down_all_interfaces_v2(
+ self, m_subp, activator, expected_call_list, available_mocks
+ ):
+ network_state = parse_net_config_data(load(V2_CONFIG))
+ activator.bring_down_all_interfaces(network_state)
+ for call in m_subp.call_args_list:
+ assert call in expected_call_list
diff --git a/tests/unittests/test_net_freebsd.py b/tests/unittests/test_net_freebsd.py
index 414b4830..3facb2bb 100644
--- a/tests/unittests/test_net_freebsd.py
+++ b/tests/unittests/test_net_freebsd.py
@@ -1,19 +1,79 @@
-from cloudinit import net
+import os
-from cloudinit.tests.helpers import (CiTestCase, mock, readResource)
+import cloudinit.net
+import cloudinit.net.network_state
+from cloudinit import safeyaml
+from tests.unittests.helpers import CiTestCase, dir2dict, mock, readResource
SAMPLE_FREEBSD_IFCONFIG_OUT = readResource("netinfo/freebsd-ifconfig-output")
+V1 = """
+config:
+- id: eno1
+ mac_address: 08:94:ef:51:ae:e0
+ mtu: 1470
+ name: eno1
+ subnets:
+ - address: 172.20.80.129/25
+ type: static
+ type: physical
+version: 1
+"""
class TestInterfacesByMac(CiTestCase):
-
- @mock.patch('cloudinit.subp.subp')
- @mock.patch('cloudinit.util.is_FreeBSD')
+ @mock.patch("cloudinit.subp.subp")
+ @mock.patch("cloudinit.util.is_FreeBSD")
def test_get_interfaces_by_mac(self, mock_is_FreeBSD, mock_subp):
mock_is_FreeBSD.return_value = True
mock_subp.return_value = (SAMPLE_FREEBSD_IFCONFIG_OUT, 0)
- a = net.get_interfaces_by_mac()
- assert a == {'52:54:00:50:b7:0d': 'vtnet0',
- '80:00:73:63:5c:48': 're0.33',
- '02:14:39:0e:25:00': 'bridge0',
- '02:ff:60:8c:f3:72': 'vnet0:11'}
+ a = cloudinit.net.get_interfaces_by_mac()
+ assert a == {
+ "52:54:00:50:b7:0d": "vtnet0",
+ "80:00:73:63:5c:48": "re0.33",
+ "02:14:39:0e:25:00": "bridge0",
+ "02:ff:60:8c:f3:72": "vnet0:11",
+ }
+
+
+class TestFreeBSDRoundTrip(CiTestCase):
+ def _render_and_read(
+ self, network_config=None, state=None, netplan_path=None, target=None
+ ):
+ if target is None:
+ target = self.tmp_dir()
+ os.mkdir("%s/etc" % target)
+ with open("%s/etc/rc.conf" % target, "a") as fd:
+ fd.write("# dummy rc.conf\n")
+ with open("%s/etc/resolv.conf" % target, "a") as fd:
+ fd.write("# dummy resolv.conf\n")
+
+ if network_config:
+ ns = cloudinit.net.network_state.parse_net_config_data(
+ network_config
+ )
+ elif state:
+ ns = state
+ else:
+ raise ValueError("Expected data or state, got neither")
+
+ renderer = cloudinit.net.freebsd.Renderer()
+ renderer.render_network_state(ns, target=target)
+ return dir2dict(target)
+
+ @mock.patch("cloudinit.subp.subp")
+ def test_render_output_has_yaml(self, mock_subp):
+
+ entry = {
+ "yaml": V1,
+ }
+ network_config = safeyaml.load(entry["yaml"])
+ ns = cloudinit.net.network_state.parse_net_config_data(network_config)
+ files = self._render_and_read(state=ns)
+ assert files == {
+ "/etc/resolv.conf": "# dummy resolv.conf\n",
+ "/etc/rc.conf": (
+ "# dummy rc.conf\n"
+ "ifconfig_eno1="
+ "'172.20.80.129 netmask 255.255.255.128 mtu 1470'\n"
+ ),
+ }
diff --git a/tests/unittests/test_netinfo.py b/tests/unittests/test_netinfo.py
new file mode 100644
index 00000000..aecce921
--- /dev/null
+++ b/tests/unittests/test_netinfo.py
@@ -0,0 +1,353 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Tests netinfo module functions and classes."""
+
+import json
+from copy import copy
+
+import pytest
+
+from cloudinit import subp
+from cloudinit.netinfo import (
+ _netdev_info_iproute_json,
+ netdev_info,
+ netdev_pformat,
+ route_pformat,
+)
+from tests.unittests.helpers import mock, readResource
+
+# Example ifconfig and route output
+SAMPLE_OLD_IFCONFIG_OUT = readResource("netinfo/old-ifconfig-output")
+SAMPLE_NEW_IFCONFIG_OUT = readResource("netinfo/new-ifconfig-output")
+SAMPLE_FREEBSD_IFCONFIG_OUT = readResource("netinfo/freebsd-ifconfig-output")
+SAMPLE_IPADDRSHOW_OUT = readResource("netinfo/sample-ipaddrshow-output")
+SAMPLE_IPADDRSHOW_JSON = readResource("netinfo/sample-ipaddrshow-json")
+SAMPLE_ROUTE_OUT_V4 = readResource("netinfo/sample-route-output-v4")
+SAMPLE_ROUTE_OUT_V6 = readResource("netinfo/sample-route-output-v6")
+SAMPLE_IPROUTE_OUT_V4 = readResource("netinfo/sample-iproute-output-v4")
+SAMPLE_IPROUTE_OUT_V6 = readResource("netinfo/sample-iproute-output-v6")
+NETDEV_FORMATTED_OUT = readResource("netinfo/netdev-formatted-output")
+ROUTE_FORMATTED_OUT = readResource("netinfo/route-formatted-output")
+FREEBSD_NETDEV_OUT = readResource("netinfo/freebsd-netdev-formatted-output")
+
+
+class TestNetInfo:
+ @mock.patch("cloudinit.netinfo.subp.which")
+ @mock.patch("cloudinit.netinfo.subp.subp")
+ def test_netdev_old_nettools_pformat(self, m_subp, m_which):
+ """netdev_pformat properly rendering old nettools info."""
+ m_subp.return_value = (SAMPLE_OLD_IFCONFIG_OUT, "")
+ m_which.side_effect = lambda x: x if x == "ifconfig" else None
+ content = netdev_pformat()
+ assert NETDEV_FORMATTED_OUT == content
+
+ @mock.patch("cloudinit.netinfo.subp.which")
+ @mock.patch("cloudinit.netinfo.subp.subp")
+ def test_netdev_new_nettools_pformat(self, m_subp, m_which):
+ """netdev_pformat properly rendering netdev new nettools info."""
+ m_subp.return_value = (SAMPLE_NEW_IFCONFIG_OUT, "")
+ m_which.side_effect = lambda x: x if x == "ifconfig" else None
+ content = netdev_pformat()
+ assert NETDEV_FORMATTED_OUT == content
+
+ @mock.patch("cloudinit.netinfo.subp.which")
+ @mock.patch("cloudinit.netinfo.subp.subp")
+ def test_netdev_freebsd_nettools_pformat(self, m_subp, m_which):
+ """netdev_pformat properly rendering netdev new nettools info."""
+ m_subp.return_value = (SAMPLE_FREEBSD_IFCONFIG_OUT, "")
+ m_which.side_effect = lambda x: x if x == "ifconfig" else None
+ content = netdev_pformat()
+ print()
+ print(content)
+ print()
+ assert FREEBSD_NETDEV_OUT == content
+
+ @pytest.mark.parametrize(
+ "resource,is_json",
+ [(SAMPLE_IPADDRSHOW_OUT, False), (SAMPLE_IPADDRSHOW_JSON, True)],
+ )
+ @mock.patch("cloudinit.netinfo.subp.which")
+ @mock.patch("cloudinit.netinfo.subp.subp")
+ def test_netdev_iproute_pformat(self, m_subp, m_which, resource, is_json):
+ """netdev_pformat properly rendering ip route info (non json)."""
+ m_subp.return_value = (resource, "")
+ if not is_json:
+ m_subp.side_effect = [subp.ProcessExecutionError, (resource, "")]
+ m_which.side_effect = lambda x: x if x == "ip" else None
+ content = netdev_pformat()
+ new_output = copy(NETDEV_FORMATTED_OUT)
+ # ip route show describes global scopes on ipv4 addresses
+ # whereas ifconfig does not. Add proper global/host scope to output.
+ new_output = new_output.replace("| . | 50:7b", "| global | 50:7b")
+ new_output = new_output.replace(
+ "255.0.0.0 | . |", "255.0.0.0 | host |"
+ )
+ assert new_output == content
+
+ @mock.patch("cloudinit.netinfo.subp.which")
+ @mock.patch("cloudinit.netinfo.subp.subp")
+ def test_netdev_warn_on_missing_commands(self, m_subp, m_which, caplog):
+ """netdev_pformat warns when missing both ip and 'netstat'."""
+ m_which.return_value = None # Niether ip nor netstat found
+ content = netdev_pformat()
+ assert "\n" == content
+ log = caplog.records[0]
+ assert log.levelname == "WARNING"
+ assert log.msg == (
+ "Could not print networks: missing 'ip' and 'ifconfig' commands"
+ )
+ m_subp.assert_not_called()
+
+ @mock.patch("cloudinit.netinfo.subp.which")
+ @mock.patch("cloudinit.netinfo.subp.subp")
+ def test_netdev_info_nettools_down(self, m_subp, m_which):
+ """test netdev_info using nettools and down interfaces."""
+ m_subp.return_value = (
+ readResource("netinfo/new-ifconfig-output-down"),
+ "",
+ )
+ m_which.side_effect = lambda x: x if x == "ifconfig" else None
+ assert netdev_info(".") == {
+ "eth0": {
+ "ipv4": [],
+ "ipv6": [],
+ "hwaddr": "00:16:3e:de:51:a6",
+ "up": False,
+ },
+ "lo": {
+ "ipv4": [{"ip": "127.0.0.1", "mask": "255.0.0.0"}],
+ "ipv6": [{"ip": "::1/128", "scope6": "host"}],
+ "hwaddr": ".",
+ "up": True,
+ },
+ }
+
+ @pytest.mark.parametrize(
+ "resource,is_json",
+ [
+ ("netinfo/sample-ipaddrshow-output-down", False),
+ ("netinfo/sample-ipaddrshow-json-down", True),
+ ],
+ )
+ @mock.patch("cloudinit.netinfo.subp.which")
+ @mock.patch("cloudinit.netinfo.subp.subp")
+ def test_netdev_info_iproute_down(
+ self, m_subp, m_which, resource, is_json
+ ):
+ """Test netdev_info with ip and down interfaces."""
+ m_subp.return_value = (readResource(resource), "")
+ if not is_json:
+ m_subp.side_effect = [
+ subp.ProcessExecutionError,
+ (readResource(resource), ""),
+ ]
+ m_which.side_effect = lambda x: x if x == "ip" else None
+ assert netdev_info(".") == {
+ "lo": {
+ "ipv4": [
+ {
+ "ip": "127.0.0.1",
+ "bcast": ".",
+ "mask": "255.0.0.0",
+ "scope": "host",
+ }
+ ],
+ "ipv6": [{"ip": "::1/128", "scope6": "host"}],
+ "hwaddr": ".",
+ "up": True,
+ },
+ "eth0": {
+ "ipv4": [],
+ "ipv6": [],
+ "hwaddr": "00:16:3e:de:51:a6",
+ "up": False,
+ },
+ }
+
+ @mock.patch("cloudinit.netinfo.netdev_info")
+ def test_netdev_pformat_with_down(self, m_netdev_info):
+ """test netdev_pformat when netdev_info returns 'down' interfaces."""
+ m_netdev_info.return_value = {
+ "lo": {
+ "ipv4": [
+ {"ip": "127.0.0.1", "mask": "255.0.0.0", "scope": "host"}
+ ],
+ "ipv6": [{"ip": "::1/128", "scope6": "host"}],
+ "hwaddr": ".",
+ "up": True,
+ },
+ "eth0": {
+ "ipv4": [],
+ "ipv6": [],
+ "hwaddr": "00:16:3e:de:51:a6",
+ "up": False,
+ },
+ }
+ assert (
+ readResource("netinfo/netdev-formatted-output-down")
+ == netdev_pformat()
+ )
+
+ @mock.patch("cloudinit.netinfo.subp.which")
+ @mock.patch("cloudinit.netinfo.subp.subp")
+ def test_route_nettools_pformat(self, m_subp, m_which):
+ """route_pformat properly rendering nettools route info."""
+
+ def subp_netstat_route_selector(*args, **kwargs):
+ if args[0] == ["netstat", "--route", "--numeric", "--extend"]:
+ return (SAMPLE_ROUTE_OUT_V4, "")
+ if args[0] == ["netstat", "-A", "inet6", "--route", "--numeric"]:
+ return (SAMPLE_ROUTE_OUT_V6, "")
+ raise Exception("Unexpected subp call %s" % args[0])
+
+ m_subp.side_effect = subp_netstat_route_selector
+ m_which.side_effect = lambda x: x if x == "netstat" else None
+ content = route_pformat()
+ assert ROUTE_FORMATTED_OUT == content
+
+ @mock.patch("cloudinit.netinfo.subp.which")
+ @mock.patch("cloudinit.netinfo.subp.subp")
+ def test_route_iproute_pformat(self, m_subp, m_which):
+ """route_pformat properly rendering ip route info."""
+
+ def subp_iproute_selector(*args, **kwargs):
+ if ["ip", "-o", "route", "list"] == args[0]:
+ return (SAMPLE_IPROUTE_OUT_V4, "")
+ v6cmd = ["ip", "--oneline", "-6", "route", "list", "table", "all"]
+ if v6cmd == args[0]:
+ return (SAMPLE_IPROUTE_OUT_V6, "")
+ raise Exception("Unexpected subp call %s" % args[0])
+
+ m_subp.side_effect = subp_iproute_selector
+ m_which.side_effect = lambda x: x if x == "ip" else None
+ content = route_pformat()
+ assert ROUTE_FORMATTED_OUT == content
+
+ @mock.patch("cloudinit.netinfo.subp.which")
+ @mock.patch("cloudinit.netinfo.subp.subp")
+ def test_route_warn_on_missing_commands(self, m_subp, m_which, caplog):
+ """route_pformat warns when missing both ip and 'netstat'."""
+ m_which.return_value = None # Niether ip nor netstat found
+ content = route_pformat()
+ assert "\n" == content
+ log = caplog.records[0]
+ assert log.levelname == "WARNING"
+ assert log.msg == (
+ "Could not print routes: missing 'ip' and 'netstat' commands"
+ )
+ m_subp.assert_not_called()
+
+ @pytest.mark.parametrize(
+ "input,expected",
+ [
+ # Test hwaddr set when link_type is ether,
+ # Test up True when flags contains UP and LOWER_UP
+ (
+ [
+ {
+ "ifname": "eth0",
+ "link_type": "ether",
+ "address": "00:00:00:00:00:00",
+ "flags": ["LOOPBACK", "UP", "LOWER_UP"],
+ }
+ ],
+ {
+ "eth0": {
+ "hwaddr": "00:00:00:00:00:00",
+ "ipv4": [],
+ "ipv6": [],
+ "up": True,
+ }
+ },
+ ),
+ # Test hwaddr not set when link_type is not ether
+ # Test up False when flags does not contain both UP and LOWER_UP
+ (
+ [
+ {
+ "ifname": "eth0",
+ "link_type": "none",
+ "address": "00:00:00:00:00:00",
+ "flags": ["LOOPBACK", "UP"],
+ }
+ ],
+ {
+ "eth0": {
+ "hwaddr": "",
+ "ipv4": [],
+ "ipv6": [],
+ "up": False,
+ }
+ },
+ ),
+ (
+ [
+ {
+ "ifname": "eth0",
+ "addr_info": [
+ # Test for ipv4:
+ # ip set correctly
+ # mask set correctly
+ # bcast set correctly
+ # scope set correctly
+ {
+ "family": "inet",
+ "local": "10.0.0.1",
+ "broadcast": "10.0.0.255",
+ "prefixlen": 24,
+ "scope": "global",
+ },
+ # Test for ipv6:
+ # ip set correctly
+ # mask set correctly when no 'address' present
+ # scope6 set correctly
+ {
+ "family": "inet6",
+ "local": "fd12:3456:7890:1234::5678:9012",
+ "prefixlen": 64,
+ "scope": "global",
+ },
+ # Test for ipv6:
+ # mask not set when 'address' present
+ {
+ "family": "inet6",
+ "local": "fd12:3456:7890:1234::5678:9012",
+ "address": "fd12:3456:7890:1234::1",
+ "prefixlen": 64,
+ },
+ ],
+ }
+ ],
+ {
+ "eth0": {
+ "hwaddr": "",
+ "ipv4": [
+ {
+ "ip": "10.0.0.1",
+ "mask": "255.255.255.0",
+ "bcast": "10.0.0.255",
+ "scope": "global",
+ }
+ ],
+ "ipv6": [
+ {
+ "ip": "fd12:3456:7890:1234::5678:9012/64",
+ "scope6": "global",
+ },
+ {
+ "ip": "fd12:3456:7890:1234::5678:9012",
+ "scope6": "",
+ },
+ ],
+ "up": False,
+ }
+ },
+ ),
+ ],
+ )
+ def test_netdev_info_iproute_json(self, input, expected):
+ out = _netdev_info_iproute_json(json.dumps(input))
+ assert out == expected
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_pathprefix2dict.py b/tests/unittests/test_pathprefix2dict.py
index abbb29b8..83141263 100644
--- a/tests/unittests/test_pathprefix2dict.py
+++ b/tests/unittests/test_pathprefix2dict.py
@@ -1,46 +1,46 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import util
-
-from cloudinit.tests.helpers import TestCase, populate_dir
-
import shutil
import tempfile
+from cloudinit import util
+from tests.unittests.helpers import TestCase, populate_dir
-class TestPathPrefix2Dict(TestCase):
+class TestPathPrefix2Dict(TestCase):
def setUp(self):
super(TestPathPrefix2Dict, self).setUp()
self.tmp = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.tmp)
def test_required_only(self):
- dirdata = {'f1': b'f1content', 'f2': b'f2content'}
+ dirdata = {"f1": b"f1content", "f2": b"f2content"}
populate_dir(self.tmp, dirdata)
- ret = util.pathprefix2dict(self.tmp, required=['f1', 'f2'])
+ ret = util.pathprefix2dict(self.tmp, required=["f1", "f2"])
self.assertEqual(dirdata, ret)
def test_required_missing(self):
- dirdata = {'f1': b'f1content'}
+ dirdata = {"f1": b"f1content"}
populate_dir(self.tmp, dirdata)
- kwargs = {'required': ['f1', 'f2']}
+ kwargs = {"required": ["f1", "f2"]}
self.assertRaises(ValueError, util.pathprefix2dict, self.tmp, **kwargs)
def test_no_required_and_optional(self):
- dirdata = {'f1': b'f1c', 'f2': b'f2c'}
+ dirdata = {"f1": b"f1c", "f2": b"f2c"}
populate_dir(self.tmp, dirdata)
- ret = util.pathprefix2dict(self.tmp, required=None,
- optional=['f1', 'f2'])
+ ret = util.pathprefix2dict(
+ self.tmp, required=None, optional=["f1", "f2"]
+ )
self.assertEqual(dirdata, ret)
def test_required_and_optional(self):
- dirdata = {'f1': b'f1c', 'f2': b'f2c'}
+ dirdata = {"f1": b"f1c", "f2": b"f2c"}
populate_dir(self.tmp, dirdata)
- ret = util.pathprefix2dict(self.tmp, required=['f1'], optional=['f2'])
+ ret = util.pathprefix2dict(self.tmp, required=["f1"], optional=["f2"])
self.assertEqual(dirdata, ret)
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_persistence.py b/tests/unittests/test_persistence.py
new file mode 100644
index 00000000..ec1152a9
--- /dev/null
+++ b/tests/unittests/test_persistence.py
@@ -0,0 +1,127 @@
+# Copyright (C) 2020 Canonical Ltd.
+#
+# Author: Daniel Watkins <oddbloke@ubuntu.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+"""
+Tests for cloudinit.persistence.
+
+Per https://docs.python.org/3/library/pickle.html, only "classes that are
+defined at the top level of a module" can be pickled. This means that all of
+our ``CloudInitPickleMixin`` subclasses for testing must be defined at
+module-level (rather than being defined inline or dynamically in the body of
+test methods, as we would do without this constraint).
+
+``TestPickleMixin.test_subclasses`` iterates over a list of all of these
+classes, and tests that they round-trip through a pickle dump/load. As the
+interface we're testing is that ``_unpickle`` is called appropriately on
+subclasses, our subclasses define their assertions in their ``_unpickle``
+implementation. (This means that the assertions will not be executed if
+``_unpickle`` is not called at all; we have
+``TestPickleMixin.test_unpickle_called`` to ensure it is called.)
+
+To avoid manually maintaining a list of classes for parametrization we use a
+simple metaclass, ``_Collector``, to gather them up.
+"""
+
+import pickle
+from unittest import mock
+
+import pytest
+
+from cloudinit.persistence import CloudInitPickleMixin
+
+
+class _Collector(type):
+ """Any class using this as a metaclass will be stored in test_classes."""
+
+ test_classes = []
+
+ def __new__(cls, *args):
+ new_cls = super().__new__(cls, *args)
+ _Collector.test_classes.append(new_cls)
+ return new_cls
+
+
+class InstanceVersionNotUsed(CloudInitPickleMixin, metaclass=_Collector):
+ """Test that the class version is used over one set in instance state."""
+
+ _ci_pkl_version = 1
+
+ def __init__(self):
+ self._ci_pkl_version = 2
+
+ def _unpickle(self, ci_pkl_version: int) -> None:
+ assert 1 == ci_pkl_version
+
+
+class MissingVersionHandled(CloudInitPickleMixin, metaclass=_Collector):
+ """Test that pickles without ``_ci_pkl_version`` are handled gracefully.
+
+ This is tested by overriding ``__getstate__`` so the dumped pickle of this
+ class will not have ``_ci_pkl_version`` included.
+ """
+
+ def __getstate__(self):
+ return self.__dict__
+
+ def _unpickle(self, ci_pkl_version: int) -> None:
+ assert 0 == ci_pkl_version
+
+
+class OverridenVersionHonored(CloudInitPickleMixin, metaclass=_Collector):
+ """Test that the subclass's version is used."""
+
+ _ci_pkl_version = 1
+
+ def _unpickle(self, ci_pkl_version: int) -> None:
+ assert 1 == ci_pkl_version
+
+
+class StateIsRestored(CloudInitPickleMixin, metaclass=_Collector):
+ """Instance state should be restored before ``_unpickle`` is called."""
+
+ def __init__(self):
+ self.some_state = "some state"
+
+ def _unpickle(self, ci_pkl_version: int) -> None:
+ assert "some state" == self.some_state
+
+
+class UnpickleCanBeUnoverriden(CloudInitPickleMixin, metaclass=_Collector):
+ """Subclasses should not need to override ``_unpickle``."""
+
+
+class VersionDefaultsToZero(CloudInitPickleMixin, metaclass=_Collector):
+ """Test that the default version is 0."""
+
+ def _unpickle(self, ci_pkl_version: int) -> None:
+ assert 0 == ci_pkl_version
+
+
+class VersionIsPoppedFromState(CloudInitPickleMixin, metaclass=_Collector):
+ """Test _ci_pkl_version is popped from state before being restored."""
+
+ def _unpickle(self, ci_pkl_version: int) -> None:
+ # `self._ci_pkl_version` returns the type's _ci_pkl_version if it isn't
+ # in instance state, so we need to explicitly check self.__dict__.
+ assert "_ci_pkl_version" not in self.__dict__
+
+
+class TestPickleMixin:
+ def test_unpickle_called(self):
+ """Test that self._unpickle is called on unpickle."""
+ with mock.patch.object(
+ CloudInitPickleMixin, "_unpickle"
+ ) as m_unpickle:
+ pickle.loads(pickle.dumps(CloudInitPickleMixin()))
+ assert 1 == m_unpickle.call_count
+
+ @pytest.mark.parametrize("cls", _Collector.test_classes)
+ def test_subclasses(self, cls):
+ """For each collected class, round-trip through pickle dump/load.
+
+ Assertions are implemented in ``cls._unpickle``, and so are evoked as
+ part of the pickle load.
+ """
+ pickle.loads(pickle.dumps(cls()))
diff --git a/tests/unittests/test_registry.py b/tests/unittests/test_registry.py
index 2b625026..28ee04ec 100644
--- a/tests/unittests/test_registry.py
+++ b/tests/unittests/test_registry.py
@@ -1,32 +1,33 @@
# This file is part of cloud-init. See LICENSE file for license information.
from cloudinit.registry import DictRegistry
-
-from cloudinit.tests.helpers import (mock, TestCase)
+from tests.unittests.helpers import TestCase, mock
class TestDictRegistry(TestCase):
-
def test_added_item_included_in_output(self):
registry = DictRegistry()
- item_key, item_to_register = 'test_key', mock.Mock()
+ item_key, item_to_register = "test_key", mock.Mock()
registry.register_item(item_key, item_to_register)
- self.assertEqual({item_key: item_to_register},
- registry.registered_items)
+ self.assertEqual(
+ {item_key: item_to_register}, registry.registered_items
+ )
def test_registry_starts_out_empty(self):
self.assertEqual({}, DictRegistry().registered_items)
def test_modifying_registered_items_isnt_exposed_to_other_callers(self):
registry = DictRegistry()
- registry.registered_items['test_item'] = mock.Mock()
+ registry.registered_items["test_item"] = mock.Mock()
self.assertEqual({}, registry.registered_items)
def test_keys_cannot_be_replaced(self):
registry = DictRegistry()
- item_key = 'test_key'
+ item_key = "test_key"
registry.register_item(item_key, mock.Mock())
- self.assertRaises(ValueError,
- registry.register_item, item_key, mock.Mock())
+ self.assertRaises(
+ ValueError, registry.register_item, item_key, mock.Mock()
+ )
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_render_cloudcfg.py b/tests/unittests/test_render_cloudcfg.py
index 495e2669..30fbd1a4 100644
--- a/tests/unittests/test_render_cloudcfg.py
+++ b/tests/unittests/test_render_cloudcfg.py
@@ -1,59 +1,96 @@
"""Tests for tools/render-cloudcfg"""
-import os
import sys
import pytest
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, templater, util
+from tests.unittests.helpers import cloud_init_project_dir
# TODO(Look to align with tools.render-cloudcfg or cloudinit.distos.OSFAMILIES)
-DISTRO_VARIANTS = ["amazon", "arch", "centos", "debian", "fedora", "freebsd",
- "netbsd", "openbsd", "rhel", "suse", "ubuntu", "unknown"]
+DISTRO_VARIANTS = [
+ "amazon",
+ "arch",
+ "centos",
+ "debian",
+ "eurolinux",
+ "fedora",
+ "freebsd",
+ "gentoo",
+ "netbsd",
+ "openbsd",
+ "photon",
+ "rhel",
+ "suse",
+ "ubuntu",
+ "unknown",
+]
@pytest.mark.allow_subp_for(sys.executable)
class TestRenderCloudCfg:
- cmd = [sys.executable, os.path.realpath('tools/render-cloudcfg')]
- tmpl_path = os.path.realpath('config/cloud.cfg.tmpl')
+ cmd = [sys.executable, cloud_init_project_dir("tools/render-cloudcfg")]
+ tmpl_path = cloud_init_project_dir("config/cloud.cfg.tmpl")
- @pytest.mark.parametrize('variant', (DISTRO_VARIANTS))
+ def test_variant_sets_distro_in_cloud_cfg_subp(self, tmpdir):
+ outfile = tmpdir.join("outcfg").strpath
+
+ subp.subp(self.cmd + ["--variant", "ubuntu", self.tmpl_path, outfile])
+ with open(outfile) as stream:
+ system_cfg = util.load_yaml(stream.read())
+ assert system_cfg["system_info"]["distro"] == "ubuntu"
+
+ @pytest.mark.parametrize("variant", (DISTRO_VARIANTS))
def test_variant_sets_distro_in_cloud_cfg(self, variant, tmpdir):
- outfile = tmpdir.join('outcfg').strpath
- subp.subp(
- self.cmd + ['--variant', variant, self.tmpl_path, outfile])
+ """Testing parametrized inputs with imported function saves ~0.5s per
+ call versus calling as subp
+ """
+ outfile = tmpdir.join("outcfg").strpath
+
+ templater.render_cloudcfg(variant, self.tmpl_path, outfile)
with open(outfile) as stream:
system_cfg = util.load_yaml(stream.read())
- if variant == 'unknown':
- variant = 'ubuntu' # Unknown is defaulted to ubuntu
- assert system_cfg['system_info']['distro'] == variant
+ if variant == "unknown":
+ variant = "ubuntu" # Unknown is defaulted to ubuntu
+ assert system_cfg["system_info"]["distro"] == variant
- @pytest.mark.parametrize('variant', (DISTRO_VARIANTS))
+ @pytest.mark.parametrize("variant", (DISTRO_VARIANTS))
def test_variant_sets_default_user_in_cloud_cfg(self, variant, tmpdir):
- outfile = tmpdir.join('outcfg').strpath
- subp.subp(
- self.cmd + ['--variant', variant, self.tmpl_path, outfile])
+ """Testing parametrized inputs with imported function saves ~0.5s per
+ call versus calling as subp
+ """
+ outfile = tmpdir.join("outcfg").strpath
+ templater.render_cloudcfg(variant, self.tmpl_path, outfile)
with open(outfile) as stream:
system_cfg = util.load_yaml(stream.read())
default_user_exceptions = {
- 'amazon': 'ec2-user', 'debian': 'ubuntu', 'unknown': 'ubuntu'}
- default_user = system_cfg['system_info']['default_user']['name']
+ "amazon": "ec2-user",
+ "debian": "ubuntu",
+ "unknown": "ubuntu",
+ }
+ default_user = system_cfg["system_info"]["default_user"]["name"]
assert default_user == default_user_exceptions.get(variant, variant)
- @pytest.mark.parametrize('variant,renderers', (
- ('freebsd', ['freebsd']), ('netbsd', ['netbsd']),
- ('openbsd', ['openbsd']), ('ubuntu', ['netplan', 'eni', 'sysconfig']))
+ @pytest.mark.parametrize(
+ "variant,renderers",
+ (
+ ("freebsd", ["freebsd"]),
+ ("netbsd", ["netbsd"]),
+ ("openbsd", ["openbsd"]),
+ ("ubuntu", ["netplan", "eni", "sysconfig"]),
+ ),
)
def test_variant_sets_network_renderer_priority_in_cloud_cfg(
self, variant, renderers, tmpdir
):
- outfile = tmpdir.join('outcfg').strpath
- subp.subp(
- self.cmd + ['--variant', variant, self.tmpl_path, outfile])
+ """Testing parametrized inputs with imported function saves ~0.5s per
+ call versus calling as subp
+ """
+ outfile = tmpdir.join("outcfg").strpath
+ templater.render_cloudcfg(variant, self.tmpl_path, outfile)
with open(outfile) as stream:
system_cfg = util.load_yaml(stream.read())
- assert renderers == system_cfg['system_info']['network']['renderers']
+ assert renderers == system_cfg["system_info"]["network"]["renderers"]
diff --git a/tests/unittests/test_reporting.py b/tests/unittests/test_reporting.py
index 9f11fd5c..f6dd96e0 100644
--- a/tests/unittests/test_reporting.py
+++ b/tests/unittests/test_reporting.py
@@ -5,109 +5,133 @@
from unittest import mock
from cloudinit import reporting
-from cloudinit.reporting import events
-from cloudinit.reporting import handlers
-
-from cloudinit.tests.helpers import TestCase
+from cloudinit.reporting import events, handlers
+from tests.unittests.helpers import TestCase
def _fake_registry():
- return mock.Mock(registered_items={'a': mock.MagicMock(),
- 'b': mock.MagicMock()})
+ return mock.Mock(
+ registered_items={"a": mock.MagicMock(), "b": mock.MagicMock()}
+ )
class TestReportStartEvent(TestCase):
-
- @mock.patch('cloudinit.reporting.events.instantiated_handler_registry',
- new_callable=_fake_registry)
+ @mock.patch(
+ "cloudinit.reporting.events.instantiated_handler_registry",
+ new_callable=_fake_registry,
+ )
def test_report_start_event_passes_something_with_as_string_to_handlers(
- self, instantiated_handler_registry):
- event_name, event_description = 'my_test_event', 'my description'
+ self, instantiated_handler_registry
+ ):
+ event_name, event_description = "my_test_event", "my description"
events.report_start_event(event_name, event_description)
- expected_string_representation = ': '.join(
- ['start', event_name, event_description])
- for _, handler in (
- instantiated_handler_registry.registered_items.items()):
+ expected_string_representation = ": ".join(
+ ["start", event_name, event_description]
+ )
+ for (
+ _,
+ handler,
+ ) in instantiated_handler_registry.registered_items.items():
self.assertEqual(1, handler.publish_event.call_count)
event = handler.publish_event.call_args[0][0]
self.assertEqual(expected_string_representation, event.as_string())
class TestReportFinishEvent(TestCase):
-
def _report_finish_event(self, result=events.status.SUCCESS):
- event_name, event_description = 'my_test_event', 'my description'
+ event_name, event_description = "my_test_event", "my description"
events.report_finish_event(
- event_name, event_description, result=result)
+ event_name, event_description, result=result
+ )
return event_name, event_description
def assertHandlersPassedObjectWithAsString(
- self, handlers, expected_as_string):
+ self, handlers, expected_as_string
+ ):
for _, handler in handlers.items():
self.assertEqual(1, handler.publish_event.call_count)
event = handler.publish_event.call_args[0][0]
self.assertEqual(expected_as_string, event.as_string())
- @mock.patch('cloudinit.reporting.events.instantiated_handler_registry',
- new_callable=_fake_registry)
+ @mock.patch(
+ "cloudinit.reporting.events.instantiated_handler_registry",
+ new_callable=_fake_registry,
+ )
def test_report_finish_event_passes_something_with_as_string_to_handlers(
- self, instantiated_handler_registry):
+ self, instantiated_handler_registry
+ ):
event_name, event_description = self._report_finish_event()
- expected_string_representation = ': '.join(
- ['finish', event_name, events.status.SUCCESS,
- event_description])
+ expected_string_representation = ": ".join(
+ ["finish", event_name, events.status.SUCCESS, event_description]
+ )
self.assertHandlersPassedObjectWithAsString(
instantiated_handler_registry.registered_items,
- expected_string_representation)
+ expected_string_representation,
+ )
- @mock.patch('cloudinit.reporting.events.instantiated_handler_registry',
- new_callable=_fake_registry)
+ @mock.patch(
+ "cloudinit.reporting.events.instantiated_handler_registry",
+ new_callable=_fake_registry,
+ )
def test_reporting_successful_finish_has_sensible_string_repr(
- self, instantiated_handler_registry):
+ self, instantiated_handler_registry
+ ):
event_name, event_description = self._report_finish_event(
- result=events.status.SUCCESS)
- expected_string_representation = ': '.join(
- ['finish', event_name, events.status.SUCCESS,
- event_description])
+ result=events.status.SUCCESS
+ )
+ expected_string_representation = ": ".join(
+ ["finish", event_name, events.status.SUCCESS, event_description]
+ )
self.assertHandlersPassedObjectWithAsString(
instantiated_handler_registry.registered_items,
- expected_string_representation)
+ expected_string_representation,
+ )
- @mock.patch('cloudinit.reporting.events.instantiated_handler_registry',
- new_callable=_fake_registry)
+ @mock.patch(
+ "cloudinit.reporting.events.instantiated_handler_registry",
+ new_callable=_fake_registry,
+ )
def test_reporting_unsuccessful_finish_has_sensible_string_repr(
- self, instantiated_handler_registry):
+ self, instantiated_handler_registry
+ ):
event_name, event_description = self._report_finish_event(
- result=events.status.FAIL)
- expected_string_representation = ': '.join(
- ['finish', event_name, events.status.FAIL, event_description])
+ result=events.status.FAIL
+ )
+ expected_string_representation = ": ".join(
+ ["finish", event_name, events.status.FAIL, event_description]
+ )
self.assertHandlersPassedObjectWithAsString(
instantiated_handler_registry.registered_items,
- expected_string_representation)
+ expected_string_representation,
+ )
def test_invalid_result_raises_attribute_error(self):
self.assertRaises(ValueError, self._report_finish_event, ("BOGUS",))
class TestReportingEvent(TestCase):
-
def test_as_string(self):
- event_type, name, description = 'test_type', 'test_name', 'test_desc'
+ event_type, name, description = "test_type", "test_name", "test_desc"
event = events.ReportingEvent(event_type, name, description)
- expected_string_representation = ': '.join(
- [event_type, name, description])
+ expected_string_representation = ": ".join(
+ [event_type, name, description]
+ )
self.assertEqual(expected_string_representation, event.as_string())
def test_as_dict(self):
- event_type, name, desc = 'test_type', 'test_name', 'test_desc'
+ event_type, name, desc = "test_type", "test_name", "test_desc"
event = events.ReportingEvent(event_type, name, desc)
- expected = {'event_type': event_type, 'name': name,
- 'description': desc, 'origin': 'cloudinit'}
+ expected = {
+ "event_type": event_type,
+ "name": name,
+ "description": desc,
+ "origin": "cloudinit",
+ }
# allow for timestamp to differ, but must be present
as_dict = event.as_dict()
- self.assertIn('timestamp', as_dict)
- del as_dict['timestamp']
+ self.assertIn("timestamp", as_dict)
+ del as_dict["timestamp"]
self.assertEqual(expected, as_dict)
@@ -115,145 +139,190 @@ class TestReportingEvent(TestCase):
class TestFinishReportingEvent(TestCase):
def test_as_has_result(self):
result = events.status.SUCCESS
- name, desc = 'test_name', 'test_desc'
+ name, desc = "test_name", "test_desc"
event = events.FinishReportingEvent(name, desc, result)
ret = event.as_dict()
- self.assertTrue('result' in ret)
- self.assertEqual(ret['result'], result)
+ self.assertTrue("result" in ret)
+ self.assertEqual(ret["result"], result)
+ def test_has_result_with_optional_post_files(self):
+ result = events.status.SUCCESS
+ name, desc, files = (
+ "test_name",
+ "test_desc",
+ ["/really/fake/path/install.log"],
+ )
+ event = events.FinishReportingEvent(
+ name, desc, result, post_files=files
+ )
+ ret = event.as_dict()
+ self.assertTrue("result" in ret)
+ self.assertTrue("files" in ret)
+ self.assertEqual(ret["result"], result)
+ posted_install_log = ret["files"][0]
+ self.assertTrue("path" in posted_install_log)
+ self.assertTrue("content" in posted_install_log)
+ self.assertTrue("encoding" in posted_install_log)
+ self.assertEqual(posted_install_log["path"], files[0])
+ self.assertEqual(posted_install_log["encoding"], "base64")
-class TestBaseReportingHandler(TestCase):
+class TestBaseReportingHandler(TestCase):
def test_base_reporting_handler_is_abstract(self):
regexp = r".*abstract.*publish_event.*"
self.assertRaisesRegex(TypeError, regexp, handlers.ReportingHandler)
class TestLogHandler(TestCase):
-
- @mock.patch.object(reporting.handlers.logging, 'getLogger')
+ @mock.patch.object(reporting.handlers.logging, "getLogger")
def test_appropriate_logger_used(self, getLogger):
- event_type, event_name = 'test_type', 'test_name'
- event = events.ReportingEvent(event_type, event_name, 'description')
+ event_type, event_name = "test_type", "test_name"
+ event = events.ReportingEvent(event_type, event_name, "description")
reporting.handlers.LogHandler().publish_event(event)
self.assertEqual(
- [mock.call(
- 'cloudinit.reporting.{0}.{1}'.format(event_type, event_name))],
- getLogger.call_args_list)
-
- @mock.patch.object(reporting.handlers.logging, 'getLogger')
+ [
+ mock.call(
+ "cloudinit.reporting.{0}.{1}".format(
+ event_type, event_name
+ )
+ )
+ ],
+ getLogger.call_args_list,
+ )
+
+ @mock.patch.object(reporting.handlers.logging, "getLogger")
def test_single_log_message_at_info_published(self, getLogger):
- event = events.ReportingEvent('type', 'name', 'description')
+ event = events.ReportingEvent("type", "name", "description")
reporting.handlers.LogHandler().publish_event(event)
self.assertEqual(1, getLogger.return_value.log.call_count)
- @mock.patch.object(reporting.handlers.logging, 'getLogger')
+ @mock.patch.object(reporting.handlers.logging, "getLogger")
def test_log_message_uses_event_as_string(self, getLogger):
- event = events.ReportingEvent('type', 'name', 'description')
+ event = events.ReportingEvent("type", "name", "description")
reporting.handlers.LogHandler(level="INFO").publish_event(event)
- self.assertIn(event.as_string(),
- getLogger.return_value.log.call_args[0][1])
+ self.assertIn(
+ event.as_string(), getLogger.return_value.log.call_args[0][1]
+ )
class TestDefaultRegisteredHandler(TestCase):
-
def test_log_handler_registered_by_default(self):
registered_items = (
- reporting.instantiated_handler_registry.registered_items)
+ reporting.instantiated_handler_registry.registered_items
+ )
for _, item in registered_items.items():
if isinstance(item, reporting.handlers.LogHandler):
break
else:
- self.fail('No reporting LogHandler registered by default.')
+ self.fail("No reporting LogHandler registered by default.")
class TestReportingConfiguration(TestCase):
-
- @mock.patch.object(reporting, 'instantiated_handler_registry')
+ @mock.patch.object(reporting, "instantiated_handler_registry")
def test_empty_configuration_doesnt_add_handlers(
- self, instantiated_handler_registry):
+ self, instantiated_handler_registry
+ ):
reporting.update_configuration({})
self.assertEqual(
- 0, instantiated_handler_registry.register_item.call_count)
+ 0, instantiated_handler_registry.register_item.call_count
+ )
@mock.patch.object(
- reporting, 'instantiated_handler_registry', reporting.DictRegistry())
- @mock.patch.object(reporting, 'available_handlers')
+ reporting, "instantiated_handler_registry", reporting.DictRegistry()
+ )
+ @mock.patch.object(reporting, "available_handlers")
def test_looks_up_handler_by_type_and_adds_it(self, available_handlers):
- handler_type_name = 'test_handler'
+ handler_type_name = "test_handler"
handler_cls = mock.Mock()
available_handlers.registered_items = {handler_type_name: handler_cls}
- handler_name = 'my_test_handler'
+ handler_name = "my_test_handler"
reporting.update_configuration(
- {handler_name: {'type': handler_type_name}})
+ {handler_name: {"type": handler_type_name}}
+ )
self.assertEqual(
{handler_name: handler_cls.return_value},
- reporting.instantiated_handler_registry.registered_items)
+ reporting.instantiated_handler_registry.registered_items,
+ )
@mock.patch.object(
- reporting, 'instantiated_handler_registry', reporting.DictRegistry())
- @mock.patch.object(reporting, 'available_handlers')
+ reporting, "instantiated_handler_registry", reporting.DictRegistry()
+ )
+ @mock.patch.object(reporting, "available_handlers")
def test_uses_non_type_parts_of_config_dict_as_kwargs(
- self, available_handlers):
- handler_type_name = 'test_handler'
+ self, available_handlers
+ ):
+ handler_type_name = "test_handler"
handler_cls = mock.Mock()
available_handlers.registered_items = {handler_type_name: handler_cls}
- extra_kwargs = {'foo': 'bar', 'bar': 'baz'}
+ extra_kwargs = {"foo": "bar", "bar": "baz"}
handler_config = extra_kwargs.copy()
- handler_config.update({'type': handler_type_name})
- handler_name = 'my_test_handler'
+ handler_config.update({"type": handler_type_name})
+ handler_name = "my_test_handler"
reporting.update_configuration({handler_name: handler_config})
self.assertEqual(
handler_cls.return_value,
reporting.instantiated_handler_registry.registered_items[
- handler_name])
- self.assertEqual([mock.call(**extra_kwargs)],
- handler_cls.call_args_list)
+ handler_name
+ ],
+ )
+ self.assertEqual(
+ [mock.call(**extra_kwargs)], handler_cls.call_args_list
+ )
@mock.patch.object(
- reporting, 'instantiated_handler_registry', reporting.DictRegistry())
- @mock.patch.object(reporting, 'available_handlers')
+ reporting, "instantiated_handler_registry", reporting.DictRegistry()
+ )
+ @mock.patch.object(reporting, "available_handlers")
def test_handler_config_not_modified(self, available_handlers):
- handler_type_name = 'test_handler'
+ handler_type_name = "test_handler"
handler_cls = mock.Mock()
available_handlers.registered_items = {handler_type_name: handler_cls}
- handler_config = {'type': handler_type_name, 'foo': 'bar'}
+ handler_config = {"type": handler_type_name, "foo": "bar"}
expected_handler_config = handler_config.copy()
- reporting.update_configuration({'my_test_handler': handler_config})
+ reporting.update_configuration({"my_test_handler": handler_config})
self.assertEqual(expected_handler_config, handler_config)
@mock.patch.object(
- reporting, 'instantiated_handler_registry', reporting.DictRegistry())
- @mock.patch.object(reporting, 'available_handlers')
+ reporting, "instantiated_handler_registry", reporting.DictRegistry()
+ )
+ @mock.patch.object(reporting, "available_handlers")
def test_handlers_removed_if_falseish_specified(self, available_handlers):
- handler_type_name = 'test_handler'
+ handler_type_name = "test_handler"
handler_cls = mock.Mock()
available_handlers.registered_items = {handler_type_name: handler_cls}
- handler_name = 'my_test_handler'
+ handler_name = "my_test_handler"
reporting.update_configuration(
- {handler_name: {'type': handler_type_name}})
+ {handler_name: {"type": handler_type_name}}
+ )
self.assertEqual(
- 1, len(reporting.instantiated_handler_registry.registered_items))
+ 1, len(reporting.instantiated_handler_registry.registered_items)
+ )
reporting.update_configuration({handler_name: None})
self.assertEqual(
- 0, len(reporting.instantiated_handler_registry.registered_items))
+ 0, len(reporting.instantiated_handler_registry.registered_items)
+ )
class TestReportingEventStack(TestCase):
- @mock.patch('cloudinit.reporting.events.report_finish_event')
- @mock.patch('cloudinit.reporting.events.report_start_event')
+ @mock.patch("cloudinit.reporting.events.report_finish_event")
+ @mock.patch("cloudinit.reporting.events.report_start_event")
def test_start_and_finish_success(self, report_start, report_finish):
with events.ReportEventStack(name="myname", description="mydesc"):
pass
self.assertEqual(
- [mock.call('myname', 'mydesc')], report_start.call_args_list)
+ [mock.call("myname", "mydesc")], report_start.call_args_list
+ )
self.assertEqual(
- [mock.call('myname', 'mydesc', events.status.SUCCESS,
- post_files=[])],
- report_finish.call_args_list)
-
- @mock.patch('cloudinit.reporting.events.report_finish_event')
- @mock.patch('cloudinit.reporting.events.report_start_event')
+ [
+ mock.call(
+ "myname", "mydesc", events.status.SUCCESS, post_files=[]
+ )
+ ],
+ report_finish.call_args_list,
+ )
+
+ @mock.patch("cloudinit.reporting.events.report_finish_event")
+ @mock.patch("cloudinit.reporting.events.report_start_event")
def test_finish_exception_defaults_fail(self, report_start, report_finish):
name = "myname"
desc = "mydesc"
@@ -265,31 +334,34 @@ class TestReportingEventStack(TestCase):
self.assertEqual([mock.call(name, desc)], report_start.call_args_list)
self.assertEqual(
[mock.call(name, desc, events.status.FAIL, post_files=[])],
- report_finish.call_args_list)
+ report_finish.call_args_list,
+ )
- @mock.patch('cloudinit.reporting.events.report_finish_event')
- @mock.patch('cloudinit.reporting.events.report_start_event')
+ @mock.patch("cloudinit.reporting.events.report_finish_event")
+ @mock.patch("cloudinit.reporting.events.report_start_event")
def test_result_on_exception_used(self, report_start, report_finish):
name = "myname"
desc = "mydesc"
try:
with events.ReportEventStack(
- name, desc, result_on_exception=events.status.WARN):
+ name, desc, result_on_exception=events.status.WARN
+ ):
raise ValueError("This didnt work")
except ValueError:
pass
self.assertEqual([mock.call(name, desc)], report_start.call_args_list)
self.assertEqual(
[mock.call(name, desc, events.status.WARN, post_files=[])],
- report_finish.call_args_list)
+ report_finish.call_args_list,
+ )
- @mock.patch('cloudinit.reporting.events.report_start_event')
+ @mock.patch("cloudinit.reporting.events.report_start_event")
def test_child_fullname_respects_parent(self, report_start):
parent_name = "topname"
c1_name = "c1name"
c2_name = "c2name"
- c2_expected_fullname = '/'.join([parent_name, c1_name, c2_name])
- c1_expected_fullname = '/'.join([parent_name, c1_name])
+ c2_expected_fullname = "/".join([parent_name, c1_name, c2_name])
+ c1_expected_fullname = "/".join([parent_name, c1_name])
parent = events.ReportEventStack(parent_name, "topdesc")
c1 = events.ReportEventStack(c1_name, "c1desc", parent=parent)
@@ -299,8 +371,8 @@ class TestReportingEventStack(TestCase):
with c2:
report_start.assert_called_with(c2_expected_fullname, "c2desc")
- @mock.patch('cloudinit.reporting.events.report_finish_event')
- @mock.patch('cloudinit.reporting.events.report_start_event')
+ @mock.patch("cloudinit.reporting.events.report_finish_event")
+ @mock.patch("cloudinit.reporting.events.report_start_event")
def test_child_result_bubbles_up(self, report_start, report_finish):
parent = events.ReportEventStack("topname", "topdesc")
child = events.ReportEventStack("c_name", "c_desc", parent=parent)
@@ -309,42 +381,53 @@ class TestReportingEventStack(TestCase):
child.result = events.status.WARN
report_finish.assert_called_with(
- "topname", "topdesc", events.status.WARN, post_files=[])
+ "topname", "topdesc", events.status.WARN, post_files=[]
+ )
- @mock.patch('cloudinit.reporting.events.report_finish_event')
+ @mock.patch("cloudinit.reporting.events.report_finish_event")
def test_message_used_in_finish(self, report_finish):
- with events.ReportEventStack("myname", "mydesc",
- message="mymessage"):
+ with events.ReportEventStack("myname", "mydesc", message="mymessage"):
pass
self.assertEqual(
- [mock.call("myname", "mymessage", events.status.SUCCESS,
- post_files=[])],
- report_finish.call_args_list)
-
- @mock.patch('cloudinit.reporting.events.report_finish_event')
+ [
+ mock.call(
+ "myname", "mymessage", events.status.SUCCESS, post_files=[]
+ )
+ ],
+ report_finish.call_args_list,
+ )
+
+ @mock.patch("cloudinit.reporting.events.report_finish_event")
def test_message_updatable(self, report_finish):
with events.ReportEventStack("myname", "mydesc") as c:
c.message = "all good"
self.assertEqual(
- [mock.call("myname", "all good", events.status.SUCCESS,
- post_files=[])],
- report_finish.call_args_list)
-
- @mock.patch('cloudinit.reporting.events.report_start_event')
- @mock.patch('cloudinit.reporting.events.report_finish_event')
+ [
+ mock.call(
+ "myname", "all good", events.status.SUCCESS, post_files=[]
+ )
+ ],
+ report_finish.call_args_list,
+ )
+
+ @mock.patch("cloudinit.reporting.events.report_start_event")
+ @mock.patch("cloudinit.reporting.events.report_finish_event")
def test_reporting_disabled_does_not_report_events(
- self, report_start, report_finish):
+ self, report_start, report_finish
+ ):
with events.ReportEventStack("a", "b", reporting_enabled=False):
pass
self.assertEqual(report_start.call_count, 0)
self.assertEqual(report_finish.call_count, 0)
- @mock.patch('cloudinit.reporting.events.report_start_event')
- @mock.patch('cloudinit.reporting.events.report_finish_event')
+ @mock.patch("cloudinit.reporting.events.report_start_event")
+ @mock.patch("cloudinit.reporting.events.report_finish_event")
def test_reporting_child_default_to_parent(
- self, report_start, report_finish):
+ self, report_start, report_finish
+ ):
parent = events.ReportEventStack(
- "pname", "pdesc", reporting_enabled=False)
+ "pname", "pdesc", reporting_enabled=False
+ )
child = events.ReportEventStack("cname", "cdesc", parent=parent)
with parent:
with child:
@@ -353,8 +436,9 @@ class TestReportingEventStack(TestCase):
self.assertEqual(report_finish.call_count, 0)
def test_reporting_event_has_sane_repr(self):
- myrep = events.ReportEventStack("fooname", "foodesc",
- reporting_enabled=True).__repr__()
+ myrep = events.ReportEventStack(
+ "fooname", "foodesc", reporting_enabled=True
+ ).__repr__()
self.assertIn("fooname", myrep)
self.assertIn("foodesc", myrep)
self.assertIn("True", myrep)
@@ -368,4 +452,5 @@ class TestStatusAccess(TestCase):
def test_invalid_status_access_raises_value_error(self):
self.assertRaises(AttributeError, getattr, events.status, "BOGUS")
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_reporting_hyperv.py b/tests/unittests/test_reporting_hyperv.py
index 9324b78d..35ab0c58 100644
--- a/tests/unittests/test_reporting_hyperv.py
+++ b/tests/unittests/test_reporting_hyperv.py
@@ -1,27 +1,25 @@
# This file is part of cloud-init. See LICENSE file for license information.
import base64
-import zlib
-
-from cloudinit.reporting import events, instantiated_handler_registry
-from cloudinit.reporting.handlers import HyperVKvpReportingHandler, LogHandler
-
import json
import os
+import re
import struct
import time
-import re
+import zlib
from unittest import mock
from cloudinit import util
-from cloudinit.tests.helpers import CiTestCase
+from cloudinit.reporting import events, instantiated_handler_registry
+from cloudinit.reporting.handlers import HyperVKvpReportingHandler, LogHandler
from cloudinit.sources.helpers import azure
+from tests.unittests.helpers import CiTestCase
class TestKvpEncoding(CiTestCase):
def test_encode_decode(self):
- kvp = {'key': 'key1', 'value': 'value1'}
+ kvp = {"key": "key1", "value": "value1"}
kvp_reporting = HyperVKvpReportingHandler()
- data = kvp_reporting._encode_kvp_item(kvp['key'], kvp['value'])
+ data = kvp_reporting._encode_kvp_item(kvp["key"], kvp["value"])
self.assertEqual(len(data), kvp_reporting.HV_KVP_RECORD_SIZE)
decoded_kvp = kvp_reporting._decode_kvp_item(data)
self.assertEqual(kvp, decoded_kvp)
@@ -30,71 +28,72 @@ class TestKvpEncoding(CiTestCase):
class TextKvpReporter(CiTestCase):
def setUp(self):
super(TextKvpReporter, self).setUp()
- self.tmp_file_path = self.tmp_path('kvp_pool_file')
+ self.tmp_file_path = self.tmp_path("kvp_pool_file")
util.ensure_file(self.tmp_file_path)
def test_events_with_higher_incarnation_not_over_written(self):
- reporter = HyperVKvpReportingHandler(
- kvp_file_path=self.tmp_file_path)
+ reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path)
self.assertEqual(0, len(list(reporter._iterate_kvps(0))))
reporter.publish_event(
- events.ReportingEvent('foo', 'name1', 'description'))
+ events.ReportingEvent("foo", "name1", "description")
+ )
reporter.publish_event(
- events.ReportingEvent('foo', 'name2', 'description'))
+ events.ReportingEvent("foo", "name2", "description")
+ )
reporter.q.join()
self.assertEqual(2, len(list(reporter._iterate_kvps(0))))
- reporter3 = HyperVKvpReportingHandler(
- kvp_file_path=self.tmp_file_path)
+ reporter3 = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path)
reporter3.incarnation_no = reporter.incarnation_no - 1
reporter3.publish_event(
- events.ReportingEvent('foo', 'name3', 'description'))
+ events.ReportingEvent("foo", "name3", "description")
+ )
reporter3.q.join()
self.assertEqual(3, len(list(reporter3._iterate_kvps(0))))
def test_finish_event_result_is_logged(self):
- reporter = HyperVKvpReportingHandler(
- kvp_file_path=self.tmp_file_path)
+ reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path)
reporter.publish_event(
- events.FinishReportingEvent('name2', 'description1',
- result=events.status.FAIL))
+ events.FinishReportingEvent(
+ "name2", "description1", result=events.status.FAIL
+ )
+ )
reporter.q.join()
- self.assertIn('FAIL', list(reporter._iterate_kvps(0))[0]['value'])
+ self.assertIn("FAIL", list(reporter._iterate_kvps(0))[0]["value"])
def test_file_operation_issue(self):
os.remove(self.tmp_file_path)
- reporter = HyperVKvpReportingHandler(
- kvp_file_path=self.tmp_file_path)
+ reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path)
reporter.publish_event(
- events.FinishReportingEvent('name2', 'description1',
- result=events.status.FAIL))
+ events.FinishReportingEvent(
+ "name2", "description1", result=events.status.FAIL
+ )
+ )
reporter.q.join()
def test_event_very_long(self):
- reporter = HyperVKvpReportingHandler(
- kvp_file_path=self.tmp_file_path)
- description = 'ab' * reporter.HV_KVP_AZURE_MAX_VALUE_SIZE
+ reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path)
+ description = "ab" * reporter.HV_KVP_AZURE_MAX_VALUE_SIZE
long_event = events.FinishReportingEvent(
- 'event_name',
- description,
- result=events.status.FAIL)
+ "event_name", description, result=events.status.FAIL
+ )
reporter.publish_event(long_event)
reporter.q.join()
kvps = list(reporter._iterate_kvps(0))
self.assertEqual(3, len(kvps))
# restore from the kvp to see the content are all there
- full_description = ''
+ full_description = ""
for i in range(len(kvps)):
- msg_slice = json.loads(kvps[i]['value'])
- self.assertEqual(msg_slice['msg_i'], i)
- full_description += msg_slice['msg']
+ msg_slice = json.loads(kvps[i]["value"])
+ self.assertEqual(msg_slice["msg_i"], i)
+ full_description += msg_slice["msg"]
self.assertEqual(description, full_description)
def test_not_truncate_kvp_file_modified_after_boot(self):
with open(self.tmp_file_path, "wb+") as f:
- kvp = {'key': 'key1', 'value': 'value1'}
+ kvp = {"key": "key1", "value": "value1"}
data = struct.pack(
"%ds%ds"
% (
@@ -118,11 +117,16 @@ class TextKvpReporter(CiTestCase):
def test_truncate_stale_kvp_file(self):
with open(self.tmp_file_path, "wb+") as f:
- kvp = {'key': 'key1', 'value': 'value1'}
- data = (struct.pack("%ds%ds" % (
- HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_KEY_SIZE,
- HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_VALUE_SIZE),
- kvp['key'].encode('utf-8'), kvp['value'].encode('utf-8')))
+ kvp = {"key": "key1", "value": "value1"}
+ data = struct.pack(
+ "%ds%ds"
+ % (
+ HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_KEY_SIZE,
+ HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_VALUE_SIZE,
+ ),
+ kvp["key"].encode("utf-8"),
+ kvp["value"].encode("utf-8"),
+ )
f.write(data)
# set the time ways back to make it look like
@@ -137,8 +141,8 @@ class TextKvpReporter(CiTestCase):
kvps = list(reporter._iterate_kvps(0))
self.assertEqual(0, len(kvps))
- @mock.patch('cloudinit.distros.uses_systemd')
- @mock.patch('cloudinit.subp.subp')
+ @mock.patch("cloudinit.distros.uses_systemd")
+ @mock.patch("cloudinit.subp.subp")
def test_get_boot_telemetry(self, m_subp, m_sysd):
reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path)
datetime_pattern = (
@@ -149,8 +153,9 @@ class TextKvpReporter(CiTestCase):
# get_boot_telemetry makes two subp calls to systemctl. We provide
# a list of values that the subp calls should return
m_subp.side_effect = [
- ('UserspaceTimestampMonotonic=1844838', ''),
- ('InactiveExitTimestampMonotonic=3068203', '')]
+ ("UserspaceTimestampMonotonic=1844838", ""),
+ ("InactiveExitTimestampMonotonic=3068203", ""),
+ ]
m_sysd.return_value = True
reporter.publish_event(azure.get_boot_telemetry())
@@ -158,15 +163,13 @@ class TextKvpReporter(CiTestCase):
kvps = list(reporter._iterate_kvps(0))
self.assertEqual(1, len(kvps))
- evt_msg = kvps[0]['value']
+ evt_msg = kvps[0]["value"]
if not re.search("kernel_start=" + datetime_pattern, evt_msg):
raise AssertionError("missing kernel_start timestamp")
if not re.search("user_start=" + datetime_pattern, evt_msg):
raise AssertionError("missing user_start timestamp")
- if not re.search("cloudinit_activation=" + datetime_pattern,
- evt_msg):
- raise AssertionError(
- "missing cloudinit_activation timestamp")
+ if not re.search("cloudinit_activation=" + datetime_pattern, evt_msg):
+ raise AssertionError("missing cloudinit_activation timestamp")
def test_get_system_info(self):
reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path)
@@ -176,7 +179,7 @@ class TextKvpReporter(CiTestCase):
reporter.q.join()
kvps = list(reporter._iterate_kvps(0))
self.assertEqual(1, len(kvps))
- evt_msg = kvps[0]['value']
+ evt_msg = kvps[0]["value"]
# the most important information is cloudinit version,
# kernel_version, and the distro variant. It is ok if
@@ -191,12 +194,11 @@ class TextKvpReporter(CiTestCase):
def test_report_diagnostic_event_without_logger_func(self):
reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path)
diagnostic_msg = "test_diagnostic"
- reporter.publish_event(
- azure.report_diagnostic_event(diagnostic_msg))
+ reporter.publish_event(azure.report_diagnostic_event(diagnostic_msg))
reporter.q.join()
kvps = list(reporter._iterate_kvps(0))
self.assertEqual(1, len(kvps))
- evt_msg = kvps[0]['value']
+ evt_msg = kvps[0]["value"]
if diagnostic_msg not in evt_msg:
raise AssertionError("missing expected diagnostic message")
@@ -206,12 +208,14 @@ class TextKvpReporter(CiTestCase):
logger_func = mock.MagicMock()
diagnostic_msg = "test_diagnostic"
reporter.publish_event(
- azure.report_diagnostic_event(diagnostic_msg,
- logger_func=logger_func))
+ azure.report_diagnostic_event(
+ diagnostic_msg, logger_func=logger_func
+ )
+ )
reporter.q.join()
kvps = list(reporter._iterate_kvps(0))
self.assertEqual(1, len(kvps))
- evt_msg = kvps[0]['value']
+ evt_msg = kvps[0]["value"]
if diagnostic_msg not in evt_msg:
raise AssertionError("missing expected diagnostic message")
@@ -221,18 +225,18 @@ class TextKvpReporter(CiTestCase):
reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path)
try:
instantiated_handler_registry.register_item("telemetry", reporter)
- event_desc = b'test_compressed'
- azure.report_compressed_event(
- "compressed event", event_desc)
+ event_desc = b"test_compressed"
+ azure.report_compressed_event("compressed event", event_desc)
self.validate_compressed_kvps(reporter, 1, [event_desc])
finally:
- instantiated_handler_registry.unregister_item("telemetry",
- force=False)
+ instantiated_handler_registry.unregister_item(
+ "telemetry", force=False
+ )
- @mock.patch('cloudinit.sources.helpers.azure.report_compressed_event')
- @mock.patch('cloudinit.sources.helpers.azure.report_diagnostic_event')
- @mock.patch('cloudinit.subp.subp')
+ @mock.patch("cloudinit.sources.helpers.azure.report_compressed_event")
+ @mock.patch("cloudinit.sources.helpers.azure.report_diagnostic_event")
+ @mock.patch("cloudinit.subp.subp")
def test_push_log_to_kvp_exception_handling(self, m_subp, m_diag, m_com):
reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path)
try:
@@ -240,7 +244,8 @@ class TextKvpReporter(CiTestCase):
log_file = self.tmp_path("cloud-init.log")
azure.MAX_LOG_TO_KVP_LENGTH = 100
azure.LOG_PUSHED_TO_KVP_INDEX_FILE = self.tmp_path(
- 'log_pushed_to_kvp')
+ "log_pushed_to_kvp"
+ )
with open(log_file, "w") as f:
log_content = "A" * 50 + "B" * 100
f.write(log_content)
@@ -251,11 +256,12 @@ class TextKvpReporter(CiTestCase):
# exceptions will trigger diagnostic reporting calls
self.assertEqual(m_diag.call_count, 3)
finally:
- instantiated_handler_registry.unregister_item("telemetry",
- force=False)
+ instantiated_handler_registry.unregister_item(
+ "telemetry", force=False
+ )
- @mock.patch('cloudinit.subp.subp')
- @mock.patch.object(LogHandler, 'publish_event')
+ @mock.patch("cloudinit.subp.subp")
+ @mock.patch.object(LogHandler, "publish_event")
def test_push_log_to_kvp(self, publish_event, m_subp):
reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path)
try:
@@ -263,7 +269,8 @@ class TextKvpReporter(CiTestCase):
log_file = self.tmp_path("cloud-init.log")
azure.MAX_LOG_TO_KVP_LENGTH = 100
azure.LOG_PUSHED_TO_KVP_INDEX_FILE = self.tmp_path(
- 'log_pushed_to_kvp')
+ "log_pushed_to_kvp"
+ )
with open(log_file, "w") as f:
log_content = "A" * 50 + "B" * 100
f.write(log_content)
@@ -275,20 +282,25 @@ class TextKvpReporter(CiTestCase):
azure.push_log_to_kvp(log_file)
# make sure dmesg is called every time
- m_subp.assert_called_with(
- ['dmesg'], capture=True, decode=False)
+ m_subp.assert_called_with(["dmesg"], capture=True, decode=False)
for call_arg in publish_event.call_args_list:
event = call_arg[0][0]
self.assertNotEqual(
- event.event_type, azure.COMPRESSED_EVENT_TYPE)
+ event.event_type, azure.COMPRESSED_EVENT_TYPE
+ )
self.validate_compressed_kvps(
- reporter, 2,
- [log_content[-azure.MAX_LOG_TO_KVP_LENGTH:].encode(),
- extra_content.encode()])
+ reporter,
+ 2,
+ [
+ log_content[-azure.MAX_LOG_TO_KVP_LENGTH :].encode(),
+ extra_content.encode(),
+ ],
+ )
finally:
- instantiated_handler_registry.unregister_item("telemetry",
- force=False)
+ instantiated_handler_registry.unregister_item(
+ "telemetry", force=False
+ )
def validate_compressed_kvps(self, reporter, count, values):
reporter.q.join()
@@ -296,7 +308,7 @@ class TextKvpReporter(CiTestCase):
compressed_count = 0
for i in range(len(kvps)):
kvp = kvps[i]
- kvp_value = kvp['value']
+ kvp_value = kvp["value"]
kvp_value_json = json.loads(kvp_value)
evt_msg = kvp_value_json["msg"]
evt_type = kvp_value_json["type"]
@@ -305,7 +317,8 @@ class TextKvpReporter(CiTestCase):
evt_msg_json = json.loads(evt_msg)
evt_encoding = evt_msg_json["encoding"]
evt_data = zlib.decompress(
- base64.decodebytes(evt_msg_json["data"].encode("ascii")))
+ base64.decodebytes(evt_msg_json["data"].encode("ascii"))
+ )
self.assertLess(compressed_count, len(values))
self.assertEqual(evt_data, values[compressed_count])
@@ -316,17 +329,21 @@ class TextKvpReporter(CiTestCase):
def test_unique_kvp_key(self):
reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path)
evt1 = events.ReportingEvent(
- "event_type", 'event_message',
- "event_description")
+ "event_type", "event_message", "event_description"
+ )
reporter.publish_event(evt1)
evt2 = events.ReportingEvent(
- "event_type", 'event_message',
- "event_description", timestamp=evt1.timestamp + 1)
+ "event_type",
+ "event_message",
+ "event_description",
+ timestamp=evt1.timestamp + 1,
+ )
reporter.publish_event(evt2)
reporter.q.join()
kvps = list(reporter._iterate_kvps(0))
self.assertEqual(2, len(kvps))
- self.assertNotEqual(kvps[0]["key"], kvps[1]["key"],
- "duplicate keys for KVP entries")
+ self.assertNotEqual(
+ kvps[0]["key"], kvps[1]["key"], "duplicate keys for KVP entries"
+ )
diff --git a/tests/unittests/test_rh_subscription.py b/tests/unittests/test_rh_subscription.py
deleted file mode 100644
index 53d3cd5a..00000000
--- a/tests/unittests/test_rh_subscription.py
+++ /dev/null
@@ -1,234 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""Tests for registering RHEL subscription via rh_subscription."""
-
-import copy
-import logging
-
-from cloudinit.config import cc_rh_subscription
-from cloudinit import subp
-
-from cloudinit.tests.helpers import CiTestCase, mock
-
-SUBMGR = cc_rh_subscription.SubscriptionManager
-SUB_MAN_CLI = 'cloudinit.config.cc_rh_subscription._sub_man_cli'
-
-
-@mock.patch(SUB_MAN_CLI)
-class GoodTests(CiTestCase):
- with_logs = True
-
- def setUp(self):
- super(GoodTests, self).setUp()
- self.name = "cc_rh_subscription"
- self.cloud_init = None
- self.log = logging.getLogger("good_tests")
- self.args = []
- self.handle = cc_rh_subscription.handle
-
- self.config = {'rh_subscription':
- {'username': 'scooby@do.com',
- 'password': 'scooby-snacks'
- }}
- self.config_full = {'rh_subscription':
- {'username': 'scooby@do.com',
- 'password': 'scooby-snacks',
- 'auto-attach': True,
- 'service-level': 'self-support',
- 'add-pool': ['pool1', 'pool2', 'pool3'],
- 'enable-repo': ['repo1', 'repo2', 'repo3'],
- 'disable-repo': ['repo4', 'repo5']
- }}
-
- def test_already_registered(self, m_sman_cli):
- '''
- Emulates a system that is already registered. Ensure it gets
- a non-ProcessExecution error from is_registered()
- '''
- self.handle(self.name, self.config, self.cloud_init,
- self.log, self.args)
- self.assertEqual(m_sman_cli.call_count, 1)
- self.assertIn('System is already registered', self.logs.getvalue())
-
- def test_simple_registration(self, m_sman_cli):
- '''
- Simple registration with username and password
- '''
- reg = "The system has been registered with ID:" \
- " 12345678-abde-abcde-1234-1234567890abc"
- m_sman_cli.side_effect = [subp.ProcessExecutionError, (reg, 'bar')]
- self.handle(self.name, self.config, self.cloud_init,
- self.log, self.args)
- self.assertIn(mock.call(['identity']), m_sman_cli.call_args_list)
- self.assertIn(mock.call(['register', '--username=scooby@do.com',
- '--password=scooby-snacks'],
- logstring_val=True),
- m_sman_cli.call_args_list)
- self.assertIn('rh_subscription plugin completed successfully',
- self.logs.getvalue())
- self.assertEqual(m_sman_cli.call_count, 2)
-
- @mock.patch.object(cc_rh_subscription.SubscriptionManager, "_getRepos")
- def test_update_repos_disable_with_none(self, m_get_repos, m_sman_cli):
- cfg = copy.deepcopy(self.config)
- m_get_repos.return_value = ([], ['repo1'])
- cfg['rh_subscription'].update(
- {'enable-repo': ['repo1'], 'disable-repo': None})
- mysm = cc_rh_subscription.SubscriptionManager(cfg)
- self.assertEqual(True, mysm.update_repos())
- m_get_repos.assert_called_with()
- self.assertEqual(m_sman_cli.call_args_list,
- [mock.call(['repos', '--enable=repo1'])])
-
- def test_full_registration(self, m_sman_cli):
- '''
- Registration with auto-attach, service-level, adding pools,
- and enabling and disabling yum repos
- '''
- call_lists = []
- call_lists.append(['attach', '--pool=pool1', '--pool=pool3'])
- call_lists.append(['repos', '--disable=repo5', '--enable=repo2',
- '--enable=repo3'])
- call_lists.append(['attach', '--auto', '--servicelevel=self-support'])
- reg = "The system has been registered with ID:" \
- " 12345678-abde-abcde-1234-1234567890abc"
- m_sman_cli.side_effect = [
- subp.ProcessExecutionError,
- (reg, 'bar'),
- ('Service level set to: self-support', ''),
- ('pool1\npool3\n', ''), ('pool2\n', ''), ('', ''),
- ('Repo ID: repo1\nRepo ID: repo5\n', ''),
- ('Repo ID: repo2\nRepo ID: repo3\nRepo ID: repo4', ''),
- ('', '')]
- self.handle(self.name, self.config_full, self.cloud_init,
- self.log, self.args)
- self.assertEqual(m_sman_cli.call_count, 9)
- for call in call_lists:
- self.assertIn(mock.call(call), m_sman_cli.call_args_list)
- self.assertIn("rh_subscription plugin completed successfully",
- self.logs.getvalue())
-
-
-@mock.patch(SUB_MAN_CLI)
-class TestBadInput(CiTestCase):
- with_logs = True
- name = "cc_rh_subscription"
- cloud_init = None
- log = logging.getLogger("bad_tests")
- args = []
- SM = cc_rh_subscription.SubscriptionManager
- reg = "The system has been registered with ID:" \
- " 12345678-abde-abcde-1234-1234567890abc"
-
- config_no_password = {'rh_subscription':
- {'username': 'scooby@do.com'
- }}
-
- config_no_key = {'rh_subscription':
- {'activation-key': '1234abcde',
- }}
-
- config_service = {'rh_subscription':
- {'username': 'scooby@do.com',
- 'password': 'scooby-snacks',
- 'service-level': 'self-support'
- }}
-
- config_badpool = {'rh_subscription':
- {'username': 'scooby@do.com',
- 'password': 'scooby-snacks',
- 'add-pool': 'not_a_list'
- }}
- config_badrepo = {'rh_subscription':
- {'username': 'scooby@do.com',
- 'password': 'scooby-snacks',
- 'enable-repo': 'not_a_list'
- }}
- config_badkey = {'rh_subscription':
- {'activation-key': 'abcdef1234',
- 'fookey': 'bar',
- 'org': '123',
- }}
-
- def setUp(self):
- super(TestBadInput, self).setUp()
- self.handle = cc_rh_subscription.handle
-
- def assert_logged_warnings(self, warnings):
- logs = self.logs.getvalue()
- missing = [w for w in warnings if "WARNING: " + w not in logs]
- self.assertEqual([], missing, "Missing expected warnings.")
-
- def test_no_password(self, m_sman_cli):
- '''Attempt to register without the password key/value.'''
- m_sman_cli.side_effect = [subp.ProcessExecutionError,
- (self.reg, 'bar')]
- self.handle(self.name, self.config_no_password, self.cloud_init,
- self.log, self.args)
- self.assertEqual(m_sman_cli.call_count, 0)
-
- def test_no_org(self, m_sman_cli):
- '''Attempt to register without the org key/value.'''
- m_sman_cli.side_effect = [subp.ProcessExecutionError]
- self.handle(self.name, self.config_no_key, self.cloud_init,
- self.log, self.args)
- m_sman_cli.assert_called_with(['identity'])
- self.assertEqual(m_sman_cli.call_count, 1)
- self.assert_logged_warnings((
- 'Unable to register system due to incomplete information.',
- 'Use either activationkey and org *or* userid and password',
- 'Registration failed or did not run completely',
- 'rh_subscription plugin did not complete successfully'))
-
- def test_service_level_without_auto(self, m_sman_cli):
- '''Attempt to register using service-level without auto-attach key.'''
- m_sman_cli.side_effect = [subp.ProcessExecutionError,
- (self.reg, 'bar')]
- self.handle(self.name, self.config_service, self.cloud_init,
- self.log, self.args)
- self.assertEqual(m_sman_cli.call_count, 1)
- self.assert_logged_warnings((
- 'The service-level key must be used in conjunction with ',
- 'rh_subscription plugin did not complete successfully'))
-
- def test_pool_not_a_list(self, m_sman_cli):
- '''
- Register with pools that are not in the format of a list
- '''
- m_sman_cli.side_effect = [subp.ProcessExecutionError,
- (self.reg, 'bar')]
- self.handle(self.name, self.config_badpool, self.cloud_init,
- self.log, self.args)
- self.assertEqual(m_sman_cli.call_count, 2)
- self.assert_logged_warnings((
- 'Pools must in the format of a list',
- 'rh_subscription plugin did not complete successfully'))
-
- def test_repo_not_a_list(self, m_sman_cli):
- '''
- Register with repos that are not in the format of a list
- '''
- m_sman_cli.side_effect = [subp.ProcessExecutionError,
- (self.reg, 'bar')]
- self.handle(self.name, self.config_badrepo, self.cloud_init,
- self.log, self.args)
- self.assertEqual(m_sman_cli.call_count, 2)
- self.assert_logged_warnings((
- 'Repo IDs must in the format of a list.',
- 'Unable to add or remove repos',
- 'rh_subscription plugin did not complete successfully'))
-
- def test_bad_key_value(self, m_sman_cli):
- '''
- Attempt to register with a key that we don't know
- '''
- m_sman_cli.side_effect = [subp.ProcessExecutionError,
- (self.reg, 'bar')]
- self.handle(self.name, self.config_badkey, self.cloud_init,
- self.log, self.args)
- self.assertEqual(m_sman_cli.call_count, 1)
- self.assert_logged_warnings((
- 'fookey is not a valid key for rh_subscription. Valid keys are:',
- 'rh_subscription plugin did not complete successfully'))
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_runs/test_merge_run.py b/tests/unittests/test_runs/test_merge_run.py
deleted file mode 100644
index ff27a280..00000000
--- a/tests/unittests/test_runs/test_merge_run.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import os
-import shutil
-import tempfile
-
-from cloudinit.tests import helpers
-
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import safeyaml
-from cloudinit import stages
-from cloudinit import util
-
-
-class TestMergeRun(helpers.FilesystemMockingTestCase):
- def _patchIn(self, root):
- self.patchOS(root)
- self.patchUtils(root)
-
- def test_none_ds(self):
- new_root = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, new_root)
- self.replicateTestRoot('simple_ubuntu', new_root)
- cfg = {
- 'datasource_list': ['None'],
- 'cloud_init_modules': ['write-files'],
- 'system_info': {'paths': {'run_dir': new_root}}
- }
- ud = helpers.readResource('user_data.1.txt')
- cloud_cfg = safeyaml.dumps(cfg)
- util.ensure_dir(os.path.join(new_root, 'etc', 'cloud'))
- util.write_file(os.path.join(new_root, 'etc',
- 'cloud', 'cloud.cfg'), cloud_cfg)
- self._patchIn(new_root)
-
- # Now start verifying whats created
- initer = stages.Init()
- initer.read_cfg()
- initer.initialize()
- initer.fetch()
- initer.datasource.userdata_raw = ud
- initer.instancify()
- initer.update()
- initer.cloudify().run('consume_data',
- initer.consume_data,
- args=[PER_INSTANCE],
- freq=PER_INSTANCE)
- mirrors = initer.distro.get_option('package_mirrors')
- self.assertEqual(1, len(mirrors))
- mirror = mirrors[0]
- self.assertEqual(mirror['arches'], ['i386', 'amd64', 'blah'])
- mods = stages.Modules(initer)
- (which_ran, failures) = mods.run_section('cloud_init_modules')
- self.assertTrue(len(failures) == 0)
- self.assertTrue(os.path.exists('/etc/blah.ini'))
- self.assertIn('write-files', which_ran)
- contents = util.load_file('/etc/blah.ini')
- self.assertEqual(contents, 'blah')
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_simpletable.py b/tests/unittests/test_simpletable.py
new file mode 100644
index 00000000..ee7eb0b4
--- /dev/null
+++ b/tests/unittests/test_simpletable.py
@@ -0,0 +1,119 @@
+# Copyright (C) 2017 Amazon.com, Inc. or its affiliates
+#
+# Author: Andrew Jorgensen <ajorgens@amazon.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+"""Tests that SimpleTable works just like PrettyTable for cloud-init.
+
+Not all possible PrettyTable cases are tested because we're not trying to
+reimplement the entire library, only the minimal parts we actually use.
+"""
+
+from cloudinit.simpletable import SimpleTable
+from tests.unittests.helpers import CiTestCase
+
+# Examples rendered by cloud-init using PrettyTable
+NET_DEVICE_FIELDS = ("Device", "Up", "Address", "Mask", "Scope", "Hw-Address")
+NET_DEVICE_ROWS = (
+ ("ens3", True, "172.31.4.203", "255.255.240.0", ".", "0a:1f:07:15:98:70"),
+ (
+ "ens3",
+ True,
+ "fe80::81f:7ff:fe15:9870/64",
+ ".",
+ "link",
+ "0a:1f:07:15:98:70",
+ ),
+ ("lo", True, "127.0.0.1", "255.0.0.0", ".", "."),
+ ("lo", True, "::1/128", ".", "host", "."),
+)
+NET_DEVICE_TABLE = """\
++--------+------+----------------------------+---------------+-------+-------------------+
+| Device | Up | Address | Mask | Scope | Hw-Address |
++--------+------+----------------------------+---------------+-------+-------------------+
+| ens3 | True | 172.31.4.203 | 255.255.240.0 | . | 0a:1f:07:15:98:70 |
+| ens3 | True | fe80::81f:7ff:fe15:9870/64 | . | link | 0a:1f:07:15:98:70 |
+| lo | True | 127.0.0.1 | 255.0.0.0 | . | . |
+| lo | True | ::1/128 | . | host | . |
++--------+------+----------------------------+---------------+-------+-------------------+""" # noqa: E501
+ROUTE_IPV4_FIELDS = (
+ "Route",
+ "Destination",
+ "Gateway",
+ "Genmask",
+ "Interface",
+ "Flags",
+)
+ROUTE_IPV4_ROWS = (
+ ("0", "0.0.0.0", "172.31.0.1", "0.0.0.0", "ens3", "UG"),
+ ("1", "169.254.0.0", "0.0.0.0", "255.255.0.0", "ens3", "U"),
+ ("2", "172.31.0.0", "0.0.0.0", "255.255.240.0", "ens3", "U"),
+)
+ROUTE_IPV4_TABLE = """\
++-------+-------------+------------+---------------+-----------+-------+
+| Route | Destination | Gateway | Genmask | Interface | Flags |
++-------+-------------+------------+---------------+-----------+-------+
+| 0 | 0.0.0.0 | 172.31.0.1 | 0.0.0.0 | ens3 | UG |
+| 1 | 169.254.0.0 | 0.0.0.0 | 255.255.0.0 | ens3 | U |
+| 2 | 172.31.0.0 | 0.0.0.0 | 255.255.240.0 | ens3 | U |
++-------+-------------+------------+---------------+-----------+-------+"""
+
+AUTHORIZED_KEYS_FIELDS = ("Keytype", "Fingerprint (md5)", "Options", "Comment")
+AUTHORIZED_KEYS_ROWS = (
+ (
+ "ssh-rsa",
+ "24:c7:41:49:47:12:31:a0:de:6f:62:79:9b:13:06:36",
+ "-",
+ "ajorgens",
+ ),
+)
+AUTHORIZED_KEYS_TABLE = """\
++---------+-------------------------------------------------+---------+----------+
+| Keytype | Fingerprint (md5) | Options | Comment |
++---------+-------------------------------------------------+---------+----------+
+| ssh-rsa | 24:c7:41:49:47:12:31:a0:de:6f:62:79:9b:13:06:36 | - | ajorgens |
++---------+-------------------------------------------------+---------+----------+""" # noqa: E501
+
+# from prettytable import PrettyTable
+# pt = PrettyTable(('HEADER',))
+# print(pt)
+NO_ROWS_FIELDS = ("HEADER",)
+NO_ROWS_TABLE = """\
++--------+
+| HEADER |
++--------+
++--------+"""
+
+
+class TestSimpleTable(CiTestCase):
+ def test_no_rows(self):
+ """An empty table is rendered as PrettyTable would have done it."""
+ table = SimpleTable(NO_ROWS_FIELDS)
+ self.assertEqual(str(table), NO_ROWS_TABLE)
+
+ def test_net_dev(self):
+ """Net device info is rendered as it was with PrettyTable."""
+ table = SimpleTable(NET_DEVICE_FIELDS)
+ for row in NET_DEVICE_ROWS:
+ table.add_row(row)
+ self.assertEqual(str(table), NET_DEVICE_TABLE)
+
+ def test_route_ipv4(self):
+ """Route IPv4 info is rendered as it was with PrettyTable."""
+ table = SimpleTable(ROUTE_IPV4_FIELDS)
+ for row in ROUTE_IPV4_ROWS:
+ table.add_row(row)
+ self.assertEqual(str(table), ROUTE_IPV4_TABLE)
+
+ def test_authorized_keys(self):
+ """SSH authorized keys are rendered as they were with PrettyTable."""
+ table = SimpleTable(AUTHORIZED_KEYS_FIELDS)
+ for row in AUTHORIZED_KEYS_ROWS:
+ table.add_row(row)
+
+ def test_get_string(self):
+ """get_string() method returns the same content as str()."""
+ table = SimpleTable(AUTHORIZED_KEYS_FIELDS)
+ for row in AUTHORIZED_KEYS_ROWS:
+ table.add_row(row)
+ self.assertEqual(table.get_string(), str(table))
diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py
index 88a111e3..d614350e 100644
--- a/tests/unittests/test_sshutil.py
+++ b/tests/unittests/test_sshutil.py
@@ -1,25 +1,65 @@
# This file is part of cloud-init. See LICENSE file for license information.
+import os
from collections import namedtuple
+from functools import partial
from unittest.mock import patch
-from cloudinit import ssh_util
-from cloudinit.tests import helpers as test_helpers
-from cloudinit import util
+from cloudinit import ssh_util, util
+from tests.unittests import helpers as test_helpers
# https://stackoverflow.com/questions/11351032/
FakePwEnt = namedtuple(
- 'FakePwEnt',
- ['pw_dir', 'pw_gecos', 'pw_name', 'pw_passwd', 'pw_shell', 'pwd_uid'])
+ "FakePwEnt",
+ [
+ "pw_name",
+ "pw_passwd",
+ "pw_uid",
+ "pw_gid",
+ "pw_gecos",
+ "pw_dir",
+ "pw_shell",
+ ],
+)
FakePwEnt.__new__.__defaults__ = tuple(
- "UNSET_%s" % n for n in FakePwEnt._fields)
+ "UNSET_%s" % n for n in FakePwEnt._fields
+)
+
+
+def mock_get_owner(updated_permissions, value):
+ try:
+ return updated_permissions[value][0]
+ except ValueError:
+ return util.get_owner(value)
+
+
+def mock_get_group(updated_permissions, value):
+ try:
+ return updated_permissions[value][1]
+ except ValueError:
+ return util.get_group(value)
+
+
+def mock_get_user_groups(username):
+ return username
+
+
+def mock_get_permissions(updated_permissions, value):
+ try:
+ return updated_permissions[value][2]
+ except ValueError:
+ return util.get_permissions(value)
+
+
+def mock_getpwnam(users, username):
+ return users[username]
# Do not use these public keys, most of them are fetched from
# the testdata for OpenSSH, and their private keys are available
# https://github.com/openssh/openssh-portable/tree/master/regress/unittests/sshkey/testdata
VALID_CONTENT = {
- 'dsa': (
+ "dsa": (
"AAAAB3NzaC1kc3MAAACBAIrjOQSlSea19bExXBMBKBvcLhBoVvNBjCppNzllipF"
"W4jgIOMcNanULRrZGjkOKat6MWJNetSbV1E6IOFDQ16rQgsh/OvYU9XhzM8seLa"
"A21VszZuhIV7/2DE3vxu7B54zVzueG1O1Deq6goQCRGWBUnqO2yluJiG4HzrnDa"
@@ -31,12 +71,12 @@ VALID_CONTENT = {
"JNDnIqDHxTkc6LY2vu8Y2pQ3/bVnllZZOda2oD5HQ7ovygQa6CH+fbaZHbdDUX/"
"5z7u2rVAlDw=="
),
- 'ecdsa': (
+ "ecdsa": (
"AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBITrGBB3cgJ"
"J7fPxvtMW9H3oRisNpJ3OAslxZeyP7I0A9BPAW0RQIwHVtVnM7zrp4nI+JLZov/"
"Ql7lc2leWL7CY="
),
- 'rsa': (
+ "rsa": (
"AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZdQueUq5oz"
"emNSj8T7enqKHOEaFoU2VoPgGEWC9RyzSQVeyD6s7APMcE82EtmW4skVEgEGSbD"
"c1pvxzxtchBj78hJP6Cf5TCMFSXw+Fz5rF1dR23QDbN1mkHs7adr8GW4kSWqU7Q"
@@ -44,11 +84,10 @@ VALID_CONTENT = {
"YWpMfYdPUnE7u536WqzFmsaqJctz3gBxH9Ex7dFtrxR4qiqEr9Qtlu3xGn7Bw07"
"/+i1D+ey3ONkZLN+LQ714cgj8fRS4Hj29SCmXp5Kt5/82cD/VN3NtHw=="
),
- 'ed25519': (
- "AAAAC3NzaC1lZDI1NTE5AAAAIA1J77+CrJ8p6/vWCEzuylqJNMHUP/XmeYyGVWb"
- "8lnDd"
+ "ed25519": (
+ "AAAAC3NzaC1lZDI1NTE5AAAAIA1J77+CrJ8p6/vWCEzuylqJNMHUP/XmeYyGVWb8lnDd"
),
- 'ecdsa-sha2-nistp256-cert-v01@openssh.com': (
+ "ecdsa-sha2-nistp256-cert-v01@openssh.com": (
"AAAAKGVjZHNhLXNoYTItbmlzdHAyNTYtY2VydC12MDFAb3BlbnNzaC5jb20AAAA"
"gQIfwT/+UX68/hlKsdKuaOuAVB6ftTg03SlP/uH4OBEwAAAAIbmlzdHAyNTYAAA"
"BBBEjA0gjJmPM6La3sXyfNlnjilvvGY6I2M8SvJj4o3X/46wcUbPWTaj4RF3EXw"
@@ -63,12 +102,12 @@ VALID_CONTENT = {
"2tM3QXkDcwdP0SxSEW5yy4XV5oAAAAhANNMm1cdVlAt3hmycQgdD82zPlg5YvVO"
"iN0SQTbgVD8i"
),
- 'ecdsa-sha2-nistp256': (
+ "ecdsa-sha2-nistp256": (
"AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEjA0gjJmPM"
"6La3sXyfNlnjilvvGY6I2M8SvJj4o3X/46wcUbPWTaj4RF3EXwHvNxplYBwdPlk"
"2zEecvf9Cs2BM="
),
- 'ecdsa-sha2-nistp384-cert-v01@openssh.com': (
+ "ecdsa-sha2-nistp384-cert-v01@openssh.com": (
"AAAAKGVjZHNhLXNoYTItbmlzdHAzODQtY2VydC12MDFAb3BlbnNzaC5jb20AAAA"
"grnSvDsK1EnCZndO1IyGWcGkVgVSkPWi/XO2ybPFyLVUAAAAIbmlzdHAzODQAAA"
"BhBAaYSQs+8TT0Tzciy0dorwhur6yzOGUrYQ6ueUQYWbE7eNdHmhsVrlpGPgSaY"
@@ -85,12 +124,12 @@ VALID_CONTENT = {
"RVYqYQgAAADAiit0UCMDAUbjD+R2x4LvU3x/t8G3sdqDLRNfMRpjZpvcS8AwC+Y"
"VFVSQNn0AyzW0="
),
- 'ecdsa-sha2-nistp384': (
+ "ecdsa-sha2-nistp384": (
"AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBAaYSQs+8TT"
"0Tzciy0dorwhur6yzOGUrYQ6ueUQYWbE7eNdHmhsVrlpGPgSaYByhXtAJiPOMqL"
"U5h0eb3sCtM3ek4NvjXFTGTqPrrxJI6q0OsgrtkGE7UM9ZsfMm7q6BOA=="
),
- 'ecdsa-sha2-nistp521-cert-v01@openssh.com': (
+ "ecdsa-sha2-nistp521-cert-v01@openssh.com": (
"AAAAKGVjZHNhLXNoYTItbmlzdHA1MjEtY2VydC12MDFAb3BlbnNzaC5jb20AAAA"
"gGmRzkkMvRFk1V5U3m3mQ2nfW20SJVXk1NKnT5iZGDcEAAAAIbmlzdHA1MjEAAA"
"CFBAHosAOHAI1ZkerbKYQ72S6uit1u77PCj/OalZtXgsxv0TTAZB273puG2X94C"
@@ -109,13 +148,13 @@ VALID_CONTENT = {
"AAAQgEzkIpX3yKXPaPcK17mNx40ujEDitm4ARmbhAge0sFhZtf7YIgI55b6vkI8"
"JvMJkzQCBF1cpNOaIpVh1nFZNBphMQ=="
),
- 'ecdsa-sha2-nistp521': (
+ "ecdsa-sha2-nistp521": (
"AAAAE2VjZHNhLXNoYTItbmlzdHA1MjEAAAAIbmlzdHA1MjEAAACFBAHosAOHAI1"
"ZkerbKYQ72S6uit1u77PCj/OalZtXgsxv0TTAZB273puG2X94CQ8yyNHcby87zF"
"ZHdv5BSKyZ/cyREAAeiAcSakop9VS3+bUfZpEIqwBZXarwUjnRnxprkcQ0rfCCd"
"agkGZr/OA7DemK2D8tKLTHsKoEEWNImo6/pXDkFxA=="
),
- 'sk-ecdsa-sha2-nistp256-cert-v01@openssh.com': (
+ "sk-ecdsa-sha2-nistp256-cert-v01@openssh.com": (
"AAAAIHNzaC1lZDI1NTE5LWNlcnQtdjAxQG9wZW5zc2guY29tAAAAIIxzuxl4z3u"
"wAIslne8Huft+1n1IhHAlNbWZkQyyECCGAAAAIFOG6kY7Rf4UtCFvPwKgo/BztX"
"ck2xC4a2WyA34XtIwZAAAAAAAAAAgAAAACAAAABmp1bGl1cwAAABIAAAAFaG9zd"
@@ -124,12 +163,12 @@ VALID_CONTENT = {
"AAFMAAAALc3NoLWVkMjU1MTkAAABABGTn+Bmz86Ajk+iqKCSdP5NClsYzn4alJd"
"0V5bizhP0Kumc/HbqQfSt684J1WdSzih+EjvnTgBhK9jTBKb90AQ=="
),
- 'sk-ecdsa-sha2-nistp256@openssh.com': (
+ "sk-ecdsa-sha2-nistp256@openssh.com": (
"AAAAInNrLWVjZHNhLXNoYTItbmlzdHAyNTZAb3BlbnNzaC5jb20AAAAIbmlzdHA"
"yNTYAAABBBIELQJ2DgvaX1yQlKFokfWM2suuaCFI2qp0eJodHyg6O4ifxc3XpRK"
"d1OS8dNYQtE/YjdXSrA+AOnMF5ns2Nkx4AAAAEc3NoOg=="
),
- 'sk-ssh-ed25519-cert-v01@openssh.com': (
+ "sk-ssh-ed25519-cert-v01@openssh.com": (
"AAAAIHNzaC1lZDI1NTE5LWNlcnQtdjAxQG9wZW5zc2guY29tAAAAIIxzuxl4z3u"
"wAIslne8Huft+1n1IhHAlNbWZkQyyECCGAAAAIFOG6kY7Rf4UtCFvPwKgo/BztX"
"ck2xC4a2WyA34XtIwZAAAAAAAAAAgAAAACAAAABmp1bGl1cwAAABIAAAAFaG9zd"
@@ -138,11 +177,11 @@ VALID_CONTENT = {
"AAFMAAAALc3NoLWVkMjU1MTkAAABABGTn+Bmz86Ajk+iqKCSdP5NClsYzn4alJd"
"0V5bizhP0Kumc/HbqQfSt684J1WdSzih+EjvnTgBhK9jTBKb90AQ=="
),
- 'sk-ssh-ed25519@openssh.com': (
+ "sk-ssh-ed25519@openssh.com": (
"AAAAGnNrLXNzaC1lZDI1NTE5QG9wZW5zc2guY29tAAAAICFo/k5LU8863u66YC9"
"eUO2170QduohPURkQnbLa/dczAAAABHNzaDo="
),
- 'ssh-dss-cert-v01@openssh.com': (
+ "ssh-dss-cert-v01@openssh.com": (
"AAAAHHNzaC1kc3MtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgdTlbNU9Hn9Qng3F"
"HxwH971bxCIoq1ern/QWFFDWXgmYAAACBAPqS600VGwdPAQC/p3f0uGyrLVql0c"
"Fn1zYd/JGvtabKnIYjLaYprje/NcjwI3CZFJiz4Dp3S8kLs+X5/1DMn/Tg1Y4D4"
@@ -159,7 +198,7 @@ VALID_CONTENT = {
"+F7SMGQAAAFMAAAALc3NoLWVkMjU1MTkAAABAh/z1LIdNL1b66tQ8t9DY9BTB3B"
"QKpTKmc7ezyFKLwl96yaIniZwD9Ticdbe/8i/Li3uCFE3EAt8NAIv9zff8Bg=="
),
- 'ssh-dss': (
+ "ssh-dss": (
"AAAAB3NzaC1kc3MAAACBAPqS600VGwdPAQC/p3f0uGyrLVql0cFn1zYd/JGvtab"
"KnIYjLaYprje/NcjwI3CZFJiz4Dp3S8kLs+X5/1DMn/Tg1Y4D4yLB+6vCtHcJF7"
"rVBFhvw/KZwc7G54ez3khyOtsg82fzpyOc8/mq+/+C5TMKO7DDjMF0k5emWKCsa"
@@ -171,7 +210,7 @@ VALID_CONTENT = {
"GIf95LiLSgaXMjko7joot+LK84ltLymwZ4QMnYjnZSSclf1UuyQMcUtb34+I0u9"
"Ycnyhp2mSFsQt"
),
- 'ssh-ed25519-cert-v01@openssh.com': (
+ "ssh-ed25519-cert-v01@openssh.com": (
"AAAAIHNzaC1lZDI1NTE5LWNlcnQtdjAxQG9wZW5zc2guY29tAAAAIIxzuxl4z3u"
"wAIslne8Huft+1n1IhHAlNbWZkQyyECCGAAAAIFOG6kY7Rf4UtCFvPwKgo/BztX"
"ck2xC4a2WyA34XtIwZAAAAAAAAAAgAAAACAAAABmp1bGl1cwAAABIAAAAFaG9zd"
@@ -180,11 +219,10 @@ VALID_CONTENT = {
"AAFMAAAALc3NoLWVkMjU1MTkAAABABGTn+Bmz86Ajk+iqKCSdP5NClsYzn4alJd"
"0V5bizhP0Kumc/HbqQfSt684J1WdSzih+EjvnTgBhK9jTBKb90AQ=="
),
- 'ssh-ed25519': (
- "AAAAC3NzaC1lZDI1NTE5AAAAIFOG6kY7Rf4UtCFvPwKgo/BztXck2xC4a2WyA34"
- "XtIwZ"
+ "ssh-ed25519": (
+ "AAAAC3NzaC1lZDI1NTE5AAAAIFOG6kY7Rf4UtCFvPwKgo/BztXck2xC4a2WyA34XtIwZ"
),
- 'ssh-rsa-cert-v01@openssh.com': (
+ "ssh-rsa-cert-v01@openssh.com": (
"AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAg98LhS2EHxLOWCLo"
"pZPwHdg/RJXusnkOqQXSc9R7aITkAAAADAQABAAAAgQDLV5lUTt7FrADseB/CGh"
"EZzpoojjEW5y8+ePvLppmK3MmMI18ud6vxzpK3bwZLYkVSyfJYI0HmIuGhdu7yM"
@@ -195,13 +233,13 @@ VALID_CONTENT = {
"he0jBkAAABTAAAAC3NzaC1lZDI1NTE5AAAAQI3QGlUCzC07KorupxpDkkGy6tni"
"aZ8EvBflzvv+itXWNchGvfUeHmVT6aX0sRqehdz/lR+GmXRoZBhofwh0qAM="
),
- 'ssh-rsa': (
+ "ssh-rsa": (
"AAAAB3NzaC1yc2EAAAADAQABAAAAgQDLV5lUTt7FrADseB/CGhEZzpoojjEW5y8"
"+ePvLppmK3MmMI18ud6vxzpK3bwZLYkVSyfJYI0HmIuGhdu7yMrW6wb84gbq8C3"
"1Xoe9EORcIUuGSvDKdNSM1SjlhDquRblDFB8kToqXyx1lqrXecXylxIUOL0jE+u"
"0rU1967pDJx+w=="
),
- 'ssh-xmss-cert-v01@openssh.com': (
+ "ssh-xmss-cert-v01@openssh.com": (
"AAAAHXNzaC14bXNzLWNlcnQtdjAxQG9wZW5zc2guY29tAAAAIM2UD0IH+Igsekq"
"xjTO5f36exX4WGRMCtDGPjwfbXblxAAAAFVhNU1NfU0hBMi0yNTZfVzE2X0gxMA"
"AAAEDI83/K5JMOy0BMJgQypRdz35ApAnoQinMJ8ZMoZPaEJF8Z4rANQlfzaAXum"
@@ -267,7 +305,7 @@ VALID_CONTENT = {
"rNYClh8fQEQ8XuOCDpomMWu58YOTfbZNMDWs/Ou7RfCjX+VNwjPShDK9joMwWKc"
"Jy3QalZbaoWtcyyvXxR2sqhVR9F7Cmasq4="
),
- 'ssh-xmss@openssh.com': (
+ "ssh-xmss@openssh.com": (
"AAAAFHNzaC14bXNzQG9wZW5zc2guY29tAAAAFVhNU1NfU0hBMi0yNTZfVzE2X0g"
"xMAAAAECqptWnK94d+Sj2xcdTu8gz+75lawZoLSZFqC5IhbYuT/Z3oBZCim6yt+"
"HAmk6MKldl3Fg+74v4sR/SII0I0Jv/"
@@ -278,19 +316,25 @@ KEY_TYPES = list(VALID_CONTENT.keys())
TEST_OPTIONS = (
"no-port-forwarding,no-agent-forwarding,no-X11-forwarding,"
- 'command="echo \'Please login as the user \"ubuntu\" rather than the'
- 'user \"root\".\';echo;sleep 10"')
+ 'command="echo \'Please login as the user "ubuntu" rather than the'
+ 'user "root".\';echo;sleep 10"'
+)
class TestAuthKeyLineParser(test_helpers.CiTestCase):
-
def test_simple_parse(self):
# test key line with common 3 fields (keytype, base64, comment)
parser = ssh_util.AuthKeyLineParser()
for ktype in KEY_TYPES:
content = VALID_CONTENT[ktype]
- comment = 'user-%s@host' % ktype
- line = ' '.join((ktype, content, comment,))
+ comment = "user-%s@host" % ktype
+ line = " ".join(
+ (
+ ktype,
+ content,
+ comment,
+ )
+ )
key = parser.parse(line)
self.assertEqual(key.base64, content)
@@ -303,7 +347,12 @@ class TestAuthKeyLineParser(test_helpers.CiTestCase):
parser = ssh_util.AuthKeyLineParser()
for ktype in KEY_TYPES:
content = VALID_CONTENT[ktype]
- line = ' '.join((ktype, content,))
+ line = " ".join(
+ (
+ ktype,
+ content,
+ )
+ )
key = parser.parse(line)
self.assertEqual(key.base64, content)
@@ -317,8 +366,15 @@ class TestAuthKeyLineParser(test_helpers.CiTestCase):
options = TEST_OPTIONS
for ktype in KEY_TYPES:
content = VALID_CONTENT[ktype]
- comment = 'user-%s@host' % ktype
- line = ' '.join((options, ktype, content, comment,))
+ comment = "user-%s@host" % ktype
+ line = " ".join(
+ (
+ options,
+ ktype,
+ content,
+ comment,
+ )
+ )
key = parser.parse(line)
self.assertEqual(key.base64, content)
@@ -330,7 +386,7 @@ class TestAuthKeyLineParser(test_helpers.CiTestCase):
# test key line with key type and base64 only
parser = ssh_util.AuthKeyLineParser()
- baseline = ' '.join(("rsa", VALID_CONTENT['rsa'], "user@host"))
+ baseline = " ".join(("rsa", VALID_CONTENT["rsa"], "user@host"))
myopts = "no-port-forwarding,no-agent-forwarding"
key = parser.parse("allowedopt" + " " + baseline)
@@ -341,59 +397,62 @@ class TestAuthKeyLineParser(test_helpers.CiTestCase):
def test_parse_invalid_keytype(self):
parser = ssh_util.AuthKeyLineParser()
- key = parser.parse(' '.join(["badkeytype", VALID_CONTENT['rsa']]))
+ key = parser.parse(" ".join(["badkeytype", VALID_CONTENT["rsa"]]))
self.assertFalse(key.valid())
class TestUpdateAuthorizedKeys(test_helpers.CiTestCase):
-
def test_new_keys_replace(self):
"""new entries with the same base64 should replace old."""
orig_entries = [
- ' '.join(('rsa', VALID_CONTENT['rsa'], 'orig_comment1')),
- ' '.join(('dsa', VALID_CONTENT['dsa'], 'orig_comment2'))]
+ " ".join(("rsa", VALID_CONTENT["rsa"], "orig_comment1")),
+ " ".join(("dsa", VALID_CONTENT["dsa"], "orig_comment2")),
+ ]
new_entries = [
- ' '.join(('rsa', VALID_CONTENT['rsa'], 'new_comment1')), ]
+ " ".join(("rsa", VALID_CONTENT["rsa"], "new_comment1")),
+ ]
- expected = '\n'.join([new_entries[0], orig_entries[1]]) + '\n'
+ expected = "\n".join([new_entries[0], orig_entries[1]]) + "\n"
parser = ssh_util.AuthKeyLineParser()
found = ssh_util.update_authorized_keys(
[parser.parse(p) for p in orig_entries],
- [parser.parse(p) for p in new_entries])
+ [parser.parse(p) for p in new_entries],
+ )
self.assertEqual(expected, found)
def test_new_invalid_keys_are_ignored(self):
"""new entries that are invalid should be skipped."""
orig_entries = [
- ' '.join(('rsa', VALID_CONTENT['rsa'], 'orig_comment1')),
- ' '.join(('dsa', VALID_CONTENT['dsa'], 'orig_comment2'))]
+ " ".join(("rsa", VALID_CONTENT["rsa"], "orig_comment1")),
+ " ".join(("dsa", VALID_CONTENT["dsa"], "orig_comment2")),
+ ]
new_entries = [
- ' '.join(('rsa', VALID_CONTENT['rsa'], 'new_comment1')),
- 'xxx-invalid-thing1',
- 'xxx-invalid-blob2'
+ " ".join(("rsa", VALID_CONTENT["rsa"], "new_comment1")),
+ "xxx-invalid-thing1",
+ "xxx-invalid-blob2",
]
- expected = '\n'.join([new_entries[0], orig_entries[1]]) + '\n'
+ expected = "\n".join([new_entries[0], orig_entries[1]]) + "\n"
parser = ssh_util.AuthKeyLineParser()
found = ssh_util.update_authorized_keys(
[parser.parse(p) for p in orig_entries],
- [parser.parse(p) for p in new_entries])
+ [parser.parse(p) for p in new_entries],
+ )
self.assertEqual(expected, found)
class TestParseSSHConfig(test_helpers.CiTestCase):
-
def setUp(self):
- self.load_file_patch = patch('cloudinit.ssh_util.util.load_file')
+ self.load_file_patch = patch("cloudinit.ssh_util.util.load_file")
self.load_file = self.load_file_patch.start()
- self.isfile_patch = patch('cloudinit.ssh_util.os.path.isfile')
+ self.isfile_patch = patch("cloudinit.ssh_util.os.path.isfile")
self.isfile = self.isfile_patch.start()
self.isfile.return_value = True
@@ -404,60 +463,61 @@ class TestParseSSHConfig(test_helpers.CiTestCase):
def test_not_a_file(self):
self.isfile.return_value = False
self.load_file.side_effect = IOError
- ret = ssh_util.parse_ssh_config('not a real file')
+ ret = ssh_util.parse_ssh_config("not a real file")
self.assertEqual([], ret)
def test_empty_file(self):
- self.load_file.return_value = ''
- ret = ssh_util.parse_ssh_config('some real file')
+ self.load_file.return_value = ""
+ ret = ssh_util.parse_ssh_config("some real file")
self.assertEqual([], ret)
def test_comment_line(self):
- comment_line = '# This is a comment'
+ comment_line = "# This is a comment"
self.load_file.return_value = comment_line
- ret = ssh_util.parse_ssh_config('some real file')
+ ret = ssh_util.parse_ssh_config("some real file")
self.assertEqual(1, len(ret))
self.assertEqual(comment_line, ret[0].line)
def test_blank_lines(self):
- lines = ['', '\t', ' ']
- self.load_file.return_value = '\n'.join(lines)
- ret = ssh_util.parse_ssh_config('some real file')
+ lines = ["", "\t", " "]
+ self.load_file.return_value = "\n".join(lines)
+ ret = ssh_util.parse_ssh_config("some real file")
self.assertEqual(len(lines), len(ret))
for line in ret:
- self.assertEqual('', line.line)
+ self.assertEqual("", line.line)
def test_lower_case_config(self):
- self.load_file.return_value = 'foo bar'
- ret = ssh_util.parse_ssh_config('some real file')
+ self.load_file.return_value = "foo bar"
+ ret = ssh_util.parse_ssh_config("some real file")
self.assertEqual(1, len(ret))
- self.assertEqual('foo', ret[0].key)
- self.assertEqual('bar', ret[0].value)
+ self.assertEqual("foo", ret[0].key)
+ self.assertEqual("bar", ret[0].value)
def test_upper_case_config(self):
- self.load_file.return_value = 'Foo Bar'
- ret = ssh_util.parse_ssh_config('some real file')
+ self.load_file.return_value = "Foo Bar"
+ ret = ssh_util.parse_ssh_config("some real file")
self.assertEqual(1, len(ret))
- self.assertEqual('foo', ret[0].key)
- self.assertEqual('Bar', ret[0].value)
+ self.assertEqual("foo", ret[0].key)
+ self.assertEqual("Bar", ret[0].value)
def test_lower_case_with_equals(self):
- self.load_file.return_value = 'foo=bar'
- ret = ssh_util.parse_ssh_config('some real file')
+ self.load_file.return_value = "foo=bar"
+ ret = ssh_util.parse_ssh_config("some real file")
self.assertEqual(1, len(ret))
- self.assertEqual('foo', ret[0].key)
- self.assertEqual('bar', ret[0].value)
+ self.assertEqual("foo", ret[0].key)
+ self.assertEqual("bar", ret[0].value)
def test_upper_case_with_equals(self):
- self.load_file.return_value = 'Foo=bar'
- ret = ssh_util.parse_ssh_config('some real file')
+ self.load_file.return_value = "Foo=bar"
+ ret = ssh_util.parse_ssh_config("some real file")
self.assertEqual(1, len(ret))
- self.assertEqual('foo', ret[0].key)
- self.assertEqual('bar', ret[0].value)
+ self.assertEqual("foo", ret[0].key)
+ self.assertEqual("bar", ret[0].value)
class TestUpdateSshConfigLines(test_helpers.CiTestCase):
"""Test the update_ssh_config_lines method."""
+
exlines = [
"#PasswordAuthentication yes",
"UsePAM yes",
@@ -476,8 +536,8 @@ class TestUpdateSshConfigLines(test_helpers.CiTestCase):
def test_new_option_added(self):
"""A single update of non-existing option."""
lines = ssh_util.parse_ssh_config_lines(list(self.exlines))
- result = ssh_util.update_ssh_config_lines(lines, {'MyKey': 'MyVal'})
- self.assertEqual(['MyKey'], result)
+ result = ssh_util.update_ssh_config_lines(lines, {"MyKey": "MyVal"})
+ self.assertEqual(["MyKey"], result)
self.check_line(lines[-1], "MyKey", "MyVal")
def test_commented_out_not_updated_but_appended(self):
@@ -487,6 +547,14 @@ class TestUpdateSshConfigLines(test_helpers.CiTestCase):
self.assertEqual([self.pwauth], result)
self.check_line(lines[-1], self.pwauth, "no")
+ def test_option_without_value(self):
+ """Implementation only accepts key-value pairs."""
+ extended_exlines = self.exlines.copy()
+ denyusers_opt = "DenyUsers"
+ extended_exlines.append(denyusers_opt)
+ lines = ssh_util.parse_ssh_config_lines(list(extended_exlines))
+ self.assertNotIn(denyusers_opt, str(lines))
+
def test_single_option_updated(self):
"""A single update should have change made and line updated."""
opt, val = ("UsePAM", "no")
@@ -497,8 +565,12 @@ class TestUpdateSshConfigLines(test_helpers.CiTestCase):
def test_multiple_updates_with_add(self):
"""Verify multiple updates some added some changed, some not."""
- updates = {"UsePAM": "no", "X11Forwarding": "no", "NewOpt": "newval",
- "AcceptEnv": "LANG ADD LC_*"}
+ updates = {
+ "UsePAM": "no",
+ "X11Forwarding": "no",
+ "NewOpt": "newval",
+ "AcceptEnv": "LANG ADD LC_*",
+ }
lines = ssh_util.parse_ssh_config_lines(list(self.exlines))
result = ssh_util.update_ssh_config_lines(lines, updates)
self.assertEqual(set(["UsePAM", "NewOpt", "AcceptEnv"]), set(result))
@@ -523,7 +595,7 @@ class TestUpdateSshConfigLines(test_helpers.CiTestCase):
class TestUpdateSshConfig(test_helpers.CiTestCase):
- cfgdata = '\n'.join(["#Option val", "MyKey ORIG_VAL", ""])
+ cfgdata = "\n".join(["#Option val", "MyKey ORIG_VAL", ""])
def test_modified(self):
mycfg = self.tmp_path("ssh_config_1")
@@ -533,7 +605,7 @@ class TestUpdateSshConfig(test_helpers.CiTestCase):
found = util.load_file(mycfg)
self.assertEqual(self.cfgdata.replace("ORIG_VAL", "NEW_VAL"), found)
# assert there is a newline at end of file (LP: #1677205)
- self.assertEqual('\n', found[-1])
+ self.assertEqual("\n", found[-1])
def test_not_modified(self):
mycfg = self.tmp_path("ssh_config_2")
@@ -550,76 +622,949 @@ class TestBasicAuthorizedKeyParse(test_helpers.CiTestCase):
self.assertEqual(
["/opt/bobby/keys"],
ssh_util.render_authorizedkeysfile_paths(
- "/opt/%u/keys", "/home/bobby", "bobby"))
+ "/opt/%u/keys", "/home/bobby", "bobby"
+ ),
+ )
+
+ def test_user_file(self):
+ self.assertEqual(
+ ["/opt/bobby"],
+ ssh_util.render_authorizedkeysfile_paths(
+ "/opt/%u", "/home/bobby", "bobby"
+ ),
+ )
+
+ def test_user_file2(self):
+ self.assertEqual(
+ ["/opt/bobby/bobby"],
+ ssh_util.render_authorizedkeysfile_paths(
+ "/opt/%u/%u", "/home/bobby", "bobby"
+ ),
+ )
def test_multiple(self):
self.assertEqual(
["/keys/path1", "/keys/path2"],
ssh_util.render_authorizedkeysfile_paths(
- "/keys/path1 /keys/path2", "/home/bobby", "bobby"))
+ "/keys/path1 /keys/path2", "/home/bobby", "bobby"
+ ),
+ )
+
+ def test_multiple2(self):
+ self.assertEqual(
+ ["/keys/path1", "/keys/bobby"],
+ ssh_util.render_authorizedkeysfile_paths(
+ "/keys/path1 /keys/%u", "/home/bobby", "bobby"
+ ),
+ )
def test_relative(self):
self.assertEqual(
["/home/bobby/.secret/keys"],
ssh_util.render_authorizedkeysfile_paths(
- ".secret/keys", "/home/bobby", "bobby"))
+ ".secret/keys", "/home/bobby", "bobby"
+ ),
+ )
def test_home(self):
self.assertEqual(
["/homedirs/bobby/.keys"],
ssh_util.render_authorizedkeysfile_paths(
- "%h/.keys", "/homedirs/bobby", "bobby"))
+ "%h/.keys", "/homedirs/bobby", "bobby"
+ ),
+ )
+
+ def test_all(self):
+ self.assertEqual(
+ [
+ "/homedirs/bobby/.keys",
+ "/homedirs/bobby/.secret/keys",
+ "/keys/path1",
+ "/opt/bobby/keys",
+ ],
+ ssh_util.render_authorizedkeysfile_paths(
+ "%h/.keys .secret/keys /keys/path1 /opt/%u/keys",
+ "/homedirs/bobby",
+ "bobby",
+ ),
+ )
class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase):
+ def create_fake_users(
+ self,
+ names,
+ mock_permissions,
+ m_get_group,
+ m_get_owner,
+ m_get_permissions,
+ m_getpwnam,
+ users,
+ ):
+ homes = []
+
+ root = "/tmp/root"
+ fpw = FakePwEnt(pw_name="root", pw_dir=root)
+ users["root"] = fpw
+
+ for name in names:
+ home = "/tmp/home/" + name
+ fpw = FakePwEnt(pw_name=name, pw_dir=home)
+ users[name] = fpw
+ homes.append(home)
+
+ m_get_permissions.side_effect = partial(
+ mock_get_permissions, mock_permissions
+ )
+ m_get_owner.side_effect = partial(mock_get_owner, mock_permissions)
+ m_get_group.side_effect = partial(mock_get_group, mock_permissions)
+ m_getpwnam.side_effect = partial(mock_getpwnam, users)
+ return homes
+
+ def create_user_authorized_file(self, home, filename, content_key, keys):
+ user_ssh_folder = "%s/.ssh" % home
+ # /tmp/home/<user>/.ssh/authorized_keys = content_key
+ authorized_keys = self.tmp_path(filename, dir=user_ssh_folder)
+ util.write_file(authorized_keys, VALID_CONTENT[content_key])
+ keys[authorized_keys] = content_key
+ return authorized_keys
+
+ def create_global_authorized_file(self, filename, content_key, keys):
+ authorized_keys = self.tmp_path(filename, dir="/tmp")
+ util.write_file(authorized_keys, VALID_CONTENT[content_key])
+ keys[authorized_keys] = content_key
+ return authorized_keys
+
+ def create_sshd_config(self, authorized_keys_files):
+ sshd_config = self.tmp_path("sshd_config", dir="/tmp")
+ util.write_file(
+ sshd_config, "AuthorizedKeysFile " + authorized_keys_files
+ )
+ return sshd_config
+
+ def execute_and_check(
+ self, user, sshd_config, solution, keys, delete_keys=True
+ ):
+ (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys(
+ user, sshd_config
+ )
+ content = ssh_util.update_authorized_keys(auth_key_entries, [])
+
+ self.assertEqual(auth_key_fn, solution)
+ for path, key in keys.items():
+ if path == solution:
+ self.assertTrue(VALID_CONTENT[key] in content)
+ else:
+ self.assertFalse(VALID_CONTENT[key] in content)
+
+ if delete_keys and os.path.isdir("/tmp/home/"):
+ util.delete_dir_contents("/tmp/home/")
+
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_single_user_two_local_files(
+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam
+ ):
+ user_bobby = "bobby"
+ keys = {}
+ users = {}
+ mock_permissions = {
+ "/tmp/home/bobby": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh/user_keys": ("bobby", "bobby", 0o600),
+ "/tmp/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600),
+ }
+
+ homes = self.create_fake_users(
+ [user_bobby],
+ mock_permissions,
+ m_get_group,
+ m_get_owner,
+ m_get_permissions,
+ m_getpwnam,
+ users,
+ )
+ home = homes[0]
+
+ # /tmp/home/bobby/.ssh/authorized_keys = rsa
+ authorized_keys = self.create_user_authorized_file(
+ home, "authorized_keys", "rsa", keys
+ )
+
+ # /tmp/home/bobby/.ssh/user_keys = dsa
+ user_keys = self.create_user_authorized_file(
+ home, "user_keys", "dsa", keys
+ )
+
+ # /tmp/sshd_config
+ options = "%s %s" % (authorized_keys, user_keys)
+ sshd_config = self.create_sshd_config(options)
+
+ self.execute_and_check(user_bobby, sshd_config, authorized_keys, keys)
@patch("cloudinit.ssh_util.pwd.getpwnam")
- def test_multiple_authorizedkeys_file_order1(self, m_getpwnam):
- fpw = FakePwEnt(pw_name='bobby', pw_dir='/home2/bobby')
- m_getpwnam.return_value = fpw
- authorized_keys = self.tmp_path('authorized_keys')
- util.write_file(authorized_keys, VALID_CONTENT['rsa'])
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_single_user_two_local_files_inverted(
+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam
+ ):
+ user_bobby = "bobby"
+ keys = {}
+ users = {}
+ mock_permissions = {
+ "/tmp/home/bobby": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh/user_keys": ("bobby", "bobby", 0o600),
+ "/tmp/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600),
+ }
+
+ homes = self.create_fake_users(
+ [user_bobby],
+ mock_permissions,
+ m_get_group,
+ m_get_owner,
+ m_get_permissions,
+ m_getpwnam,
+ users,
+ )
+ home = homes[0]
- user_keys = self.tmp_path('user_keys')
- util.write_file(user_keys, VALID_CONTENT['dsa'])
+ # /tmp/home/bobby/.ssh/authorized_keys = rsa
+ authorized_keys = self.create_user_authorized_file(
+ home, "authorized_keys", "rsa", keys
+ )
- sshd_config = self.tmp_path('sshd_config')
- util.write_file(
- sshd_config,
- "AuthorizedKeysFile %s %s" % (authorized_keys, user_keys)
+ # /tmp/home/bobby/.ssh/user_keys = dsa
+ user_keys = self.create_user_authorized_file(
+ home, "user_keys", "dsa", keys
)
- (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys(
- fpw.pw_name, sshd_config)
- content = ssh_util.update_authorized_keys(auth_key_entries, [])
+ # /tmp/sshd_config
+ options = "%s %s" % (user_keys, authorized_keys)
+ sshd_config = self.create_sshd_config(options)
- self.assertEqual(authorized_keys, auth_key_fn)
- self.assertTrue(VALID_CONTENT['rsa'] in content)
- self.assertTrue(VALID_CONTENT['dsa'] in content)
+ self.execute_and_check(user_bobby, sshd_config, user_keys, keys)
@patch("cloudinit.ssh_util.pwd.getpwnam")
- def test_multiple_authorizedkeys_file_order2(self, m_getpwnam):
- fpw = FakePwEnt(pw_name='suzie', pw_dir='/home/suzie')
- m_getpwnam.return_value = fpw
- authorized_keys = self.tmp_path('authorized_keys')
- util.write_file(authorized_keys, VALID_CONTENT['rsa'])
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_single_user_local_global_files(
+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam
+ ):
+ user_bobby = "bobby"
+ keys = {}
+ users = {}
+ mock_permissions = {
+ "/tmp/home/bobby": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh/user_keys": ("bobby", "bobby", 0o600),
+ "/tmp/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600),
+ }
+
+ homes = self.create_fake_users(
+ [user_bobby],
+ mock_permissions,
+ m_get_group,
+ m_get_owner,
+ m_get_permissions,
+ m_getpwnam,
+ users,
+ )
+ home = homes[0]
- user_keys = self.tmp_path('user_keys')
- util.write_file(user_keys, VALID_CONTENT['dsa'])
+ # /tmp/home/bobby/.ssh/authorized_keys = rsa
+ authorized_keys = self.create_user_authorized_file(
+ home, "authorized_keys", "rsa", keys
+ )
- sshd_config = self.tmp_path('sshd_config')
- util.write_file(
- sshd_config,
- "AuthorizedKeysFile %s %s" % (user_keys, authorized_keys)
+ # /tmp/home/bobby/.ssh/user_keys = dsa
+ user_keys = self.create_user_authorized_file(
+ home, "user_keys", "dsa", keys
)
- (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys(
- fpw.pw_name, sshd_config
+ authorized_keys_global = self.create_global_authorized_file(
+ "etc/ssh/authorized_keys", "ecdsa", keys
)
- content = ssh_util.update_authorized_keys(auth_key_entries, [])
- self.assertEqual(user_keys, auth_key_fn)
- self.assertTrue(VALID_CONTENT['rsa'] in content)
- self.assertTrue(VALID_CONTENT['dsa'] in content)
+ options = "%s %s %s" % (
+ authorized_keys_global,
+ user_keys,
+ authorized_keys,
+ )
+ sshd_config = self.create_sshd_config(options)
+
+ self.execute_and_check(user_bobby, sshd_config, user_keys, keys)
+
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_single_user_local_global_files_inverted(
+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam
+ ):
+ user_bobby = "bobby"
+ keys = {}
+ users = {}
+ mock_permissions = {
+ "/tmp/home/bobby": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh/user_keys3": ("bobby", "bobby", 0o600),
+ "/tmp/home/bobby/.ssh/authorized_keys2": ("bobby", "bobby", 0o600),
+ }
+
+ homes = self.create_fake_users(
+ [user_bobby],
+ mock_permissions,
+ m_get_group,
+ m_get_owner,
+ m_get_permissions,
+ m_getpwnam,
+ users,
+ )
+ home = homes[0]
+
+ # /tmp/home/bobby/.ssh/authorized_keys = rsa
+ authorized_keys = self.create_user_authorized_file(
+ home, "authorized_keys2", "rsa", keys
+ )
+
+ # /tmp/home/bobby/.ssh/user_keys = dsa
+ user_keys = self.create_user_authorized_file(
+ home, "user_keys3", "dsa", keys
+ )
+
+ authorized_keys_global = self.create_global_authorized_file(
+ "etc/ssh/authorized_keys", "ecdsa", keys
+ )
+
+ options = "%s %s %s" % (
+ authorized_keys_global,
+ authorized_keys,
+ user_keys,
+ )
+ sshd_config = self.create_sshd_config(options)
+
+ self.execute_and_check(user_bobby, sshd_config, authorized_keys, keys)
+
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_single_user_global_file(
+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam
+ ):
+ user_bobby = "bobby"
+ keys = {}
+ users = {}
+ mock_permissions = {
+ "/tmp/home/bobby": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600),
+ }
+
+ homes = self.create_fake_users(
+ [user_bobby],
+ mock_permissions,
+ m_get_group,
+ m_get_owner,
+ m_get_permissions,
+ m_getpwnam,
+ users,
+ )
+ home = homes[0]
+
+ # /tmp/etc/ssh/authorized_keys = rsa
+ authorized_keys_global = self.create_global_authorized_file(
+ "etc/ssh/authorized_keys", "rsa", keys
+ )
+
+ options = "%s" % authorized_keys_global
+ sshd_config = self.create_sshd_config(options)
+
+ default = "%s/.ssh/authorized_keys" % home
+ self.execute_and_check(user_bobby, sshd_config, default, keys)
+
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_two_users_local_file_standard(
+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam
+ ):
+ keys = {}
+ users = {}
+ mock_permissions = {
+ "/tmp/home/bobby": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600),
+ "/tmp/home/suzie": ("suzie", "suzie", 0o700),
+ "/tmp/home/suzie/.ssh": ("suzie", "suzie", 0o700),
+ "/tmp/home/suzie/.ssh/authorized_keys": ("suzie", "suzie", 0o600),
+ }
+
+ user_bobby = "bobby"
+ user_suzie = "suzie"
+ homes = self.create_fake_users(
+ [user_bobby, user_suzie],
+ mock_permissions,
+ m_get_group,
+ m_get_owner,
+ m_get_permissions,
+ m_getpwnam,
+ users,
+ )
+ home_bobby = homes[0]
+ home_suzie = homes[1]
+
+ # /tmp/home/bobby/.ssh/authorized_keys = rsa
+ authorized_keys = self.create_user_authorized_file(
+ home_bobby, "authorized_keys", "rsa", keys
+ )
+
+ # /tmp/home/suzie/.ssh/authorized_keys = rsa
+ authorized_keys2 = self.create_user_authorized_file(
+ home_suzie, "authorized_keys", "ssh-xmss@openssh.com", keys
+ )
+
+ options = ".ssh/authorized_keys"
+ sshd_config = self.create_sshd_config(options)
+
+ self.execute_and_check(
+ user_bobby, sshd_config, authorized_keys, keys, delete_keys=False
+ )
+ self.execute_and_check(user_suzie, sshd_config, authorized_keys2, keys)
+
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_two_users_local_file_custom(
+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam
+ ):
+ keys = {}
+ users = {}
+ mock_permissions = {
+ "/tmp/home/bobby": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh/authorized_keys2": ("bobby", "bobby", 0o600),
+ "/tmp/home/suzie": ("suzie", "suzie", 0o700),
+ "/tmp/home/suzie/.ssh": ("suzie", "suzie", 0o700),
+ "/tmp/home/suzie/.ssh/authorized_keys2": ("suzie", "suzie", 0o600),
+ }
+
+ user_bobby = "bobby"
+ user_suzie = "suzie"
+ homes = self.create_fake_users(
+ [user_bobby, user_suzie],
+ mock_permissions,
+ m_get_group,
+ m_get_owner,
+ m_get_permissions,
+ m_getpwnam,
+ users,
+ )
+ home_bobby = homes[0]
+ home_suzie = homes[1]
+
+ # /tmp/home/bobby/.ssh/authorized_keys2 = rsa
+ authorized_keys = self.create_user_authorized_file(
+ home_bobby, "authorized_keys2", "rsa", keys
+ )
+
+ # /tmp/home/suzie/.ssh/authorized_keys2 = rsa
+ authorized_keys2 = self.create_user_authorized_file(
+ home_suzie, "authorized_keys2", "ssh-xmss@openssh.com", keys
+ )
+
+ options = ".ssh/authorized_keys2"
+ sshd_config = self.create_sshd_config(options)
+
+ self.execute_and_check(
+ user_bobby, sshd_config, authorized_keys, keys, delete_keys=False
+ )
+ self.execute_and_check(user_suzie, sshd_config, authorized_keys2, keys)
+
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_two_users_local_global_files(
+ self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam
+ ):
+ keys = {}
+ users = {}
+ mock_permissions = {
+ "/tmp/home/bobby": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh/authorized_keys2": ("bobby", "bobby", 0o600),
+ "/tmp/home/bobby/.ssh/user_keys3": ("bobby", "bobby", 0o600),
+ "/tmp/home/suzie": ("suzie", "suzie", 0o700),
+ "/tmp/home/suzie/.ssh": ("suzie", "suzie", 0o700),
+ "/tmp/home/suzie/.ssh/authorized_keys2": ("suzie", "suzie", 0o600),
+ "/tmp/home/suzie/.ssh/user_keys3": ("suzie", "suzie", 0o600),
+ }
+
+ user_bobby = "bobby"
+ user_suzie = "suzie"
+ homes = self.create_fake_users(
+ [user_bobby, user_suzie],
+ mock_permissions,
+ m_get_group,
+ m_get_owner,
+ m_get_permissions,
+ m_getpwnam,
+ users,
+ )
+ home_bobby = homes[0]
+ home_suzie = homes[1]
+
+ # /tmp/home/bobby/.ssh/authorized_keys2 = rsa
+ self.create_user_authorized_file(
+ home_bobby, "authorized_keys2", "rsa", keys
+ )
+ # /tmp/home/bobby/.ssh/user_keys3 = dsa
+ user_keys = self.create_user_authorized_file(
+ home_bobby, "user_keys3", "dsa", keys
+ )
+
+ # /tmp/home/suzie/.ssh/authorized_keys2 = rsa
+ authorized_keys2 = self.create_user_authorized_file(
+ home_suzie, "authorized_keys2", "ssh-xmss@openssh.com", keys
+ )
+
+ # /tmp/etc/ssh/authorized_keys = ecdsa
+ authorized_keys_global = self.create_global_authorized_file(
+ "etc/ssh/authorized_keys2", "ecdsa", keys
+ )
+
+ options = "%s %s %%h/.ssh/authorized_keys2" % (
+ authorized_keys_global,
+ user_keys,
+ )
+ sshd_config = self.create_sshd_config(options)
+
+ self.execute_and_check(
+ user_bobby, sshd_config, user_keys, keys, delete_keys=False
+ )
+ self.execute_and_check(user_suzie, sshd_config, authorized_keys2, keys)
+
+ @patch("cloudinit.util.get_user_groups")
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_two_users_local_global_files_badguy(
+ self,
+ m_get_group,
+ m_get_owner,
+ m_get_permissions,
+ m_getpwnam,
+ m_get_user_groups,
+ ):
+ keys = {}
+ users = {}
+ mock_permissions = {
+ "/tmp/home/bobby": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh/authorized_keys2": ("bobby", "bobby", 0o600),
+ "/tmp/home/bobby/.ssh/user_keys3": ("bobby", "bobby", 0o600),
+ "/tmp/home/badguy": ("root", "root", 0o755),
+ "/tmp/home/badguy/home": ("root", "root", 0o755),
+ "/tmp/home/badguy/home/bobby": ("root", "root", 0o655),
+ }
+
+ user_bobby = "bobby"
+ user_badguy = "badguy"
+ home_bobby, *_ = self.create_fake_users(
+ [user_bobby, user_badguy],
+ mock_permissions,
+ m_get_group,
+ m_get_owner,
+ m_get_permissions,
+ m_getpwnam,
+ users,
+ )
+ m_get_user_groups.side_effect = mock_get_user_groups
+
+ # /tmp/home/bobby/.ssh/authorized_keys2 = rsa
+ authorized_keys = self.create_user_authorized_file(
+ home_bobby, "authorized_keys2", "rsa", keys
+ )
+ # /tmp/home/bobby/.ssh/user_keys3 = dsa
+ user_keys = self.create_user_authorized_file(
+ home_bobby, "user_keys3", "dsa", keys
+ )
+
+ # /tmp/home/badguy/home/bobby = ""
+ authorized_keys2 = self.tmp_path("home/bobby", dir="/tmp/home/badguy")
+ util.write_file(authorized_keys2, "")
+
+ # /tmp/etc/ssh/authorized_keys = ecdsa
+ authorized_keys_global = self.create_global_authorized_file(
+ "etc/ssh/authorized_keys2", "ecdsa", keys
+ )
+
+ # /tmp/sshd_config
+ options = "%s %%h/.ssh/authorized_keys2 %s %s" % (
+ authorized_keys2,
+ authorized_keys_global,
+ user_keys,
+ )
+ sshd_config = self.create_sshd_config(options)
+
+ self.execute_and_check(
+ user_bobby, sshd_config, authorized_keys, keys, delete_keys=False
+ )
+ self.execute_and_check(
+ user_badguy, sshd_config, authorized_keys2, keys
+ )
+
+ @patch("cloudinit.util.get_user_groups")
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_two_users_unaccessible_file(
+ self,
+ m_get_group,
+ m_get_owner,
+ m_get_permissions,
+ m_getpwnam,
+ m_get_user_groups,
+ ):
+ keys = {}
+ users = {}
+ mock_permissions = {
+ "/tmp/home/bobby": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600),
+ "/tmp/etc": ("root", "root", 0o755),
+ "/tmp/etc/ssh": ("root", "root", 0o755),
+ "/tmp/etc/ssh/userkeys": ("root", "root", 0o700),
+ "/tmp/etc/ssh/userkeys/bobby": ("bobby", "bobby", 0o600),
+ "/tmp/etc/ssh/userkeys/badguy": ("badguy", "badguy", 0o600),
+ "/tmp/home/badguy": ("badguy", "badguy", 0o700),
+ "/tmp/home/badguy/.ssh": ("badguy", "badguy", 0o700),
+ "/tmp/home/badguy/.ssh/authorized_keys": (
+ "badguy",
+ "badguy",
+ 0o600,
+ ),
+ }
+
+ user_bobby = "bobby"
+ user_badguy = "badguy"
+ homes = self.create_fake_users(
+ [user_bobby, user_badguy],
+ mock_permissions,
+ m_get_group,
+ m_get_owner,
+ m_get_permissions,
+ m_getpwnam,
+ users,
+ )
+ m_get_user_groups.side_effect = mock_get_user_groups
+ home_bobby = homes[0]
+ home_badguy = homes[1]
+
+ # /tmp/home/bobby/.ssh/authorized_keys = rsa
+ authorized_keys = self.create_user_authorized_file(
+ home_bobby, "authorized_keys", "rsa", keys
+ )
+ # /tmp/etc/ssh/userkeys/bobby = dsa
+ # assume here that we can bypass userkeys, despite permissions
+ self.create_global_authorized_file(
+ "etc/ssh/userkeys/bobby", "dsa", keys
+ )
+
+ # /tmp/home/badguy/.ssh/authorized_keys = ssh-xmss@openssh.com
+ authorized_keys2 = self.create_user_authorized_file(
+ home_badguy, "authorized_keys", "ssh-xmss@openssh.com", keys
+ )
+
+ # /tmp/etc/ssh/userkeys/badguy = ecdsa
+ self.create_global_authorized_file(
+ "etc/ssh/userkeys/badguy", "ecdsa", keys
+ )
+
+ # /tmp/sshd_config
+ options = "/tmp/etc/ssh/userkeys/%u .ssh/authorized_keys"
+ sshd_config = self.create_sshd_config(options)
+
+ self.execute_and_check(
+ user_bobby, sshd_config, authorized_keys, keys, delete_keys=False
+ )
+ self.execute_and_check(
+ user_badguy, sshd_config, authorized_keys2, keys
+ )
+
+ @patch("cloudinit.util.get_user_groups")
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_two_users_accessible_file(
+ self,
+ m_get_group,
+ m_get_owner,
+ m_get_permissions,
+ m_getpwnam,
+ m_get_user_groups,
+ ):
+ keys = {}
+ users = {}
+ mock_permissions = {
+ "/tmp/home/bobby": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600),
+ "/tmp/etc": ("root", "root", 0o755),
+ "/tmp/etc/ssh": ("root", "root", 0o755),
+ "/tmp/etc/ssh/userkeys": ("root", "root", 0o755),
+ "/tmp/etc/ssh/userkeys/bobby": ("bobby", "bobby", 0o600),
+ "/tmp/etc/ssh/userkeys/badguy": ("badguy", "badguy", 0o600),
+ "/tmp/home/badguy": ("badguy", "badguy", 0o700),
+ "/tmp/home/badguy/.ssh": ("badguy", "badguy", 0o700),
+ "/tmp/home/badguy/.ssh/authorized_keys": (
+ "badguy",
+ "badguy",
+ 0o600,
+ ),
+ }
+
+ user_bobby = "bobby"
+ user_badguy = "badguy"
+ homes = self.create_fake_users(
+ [user_bobby, user_badguy],
+ mock_permissions,
+ m_get_group,
+ m_get_owner,
+ m_get_permissions,
+ m_getpwnam,
+ users,
+ )
+ m_get_user_groups.side_effect = mock_get_user_groups
+ home_bobby = homes[0]
+ home_badguy = homes[1]
+
+ # /tmp/home/bobby/.ssh/authorized_keys = rsa
+ self.create_user_authorized_file(
+ home_bobby, "authorized_keys", "rsa", keys
+ )
+ # /tmp/etc/ssh/userkeys/bobby = dsa
+ # assume here that we can bypass userkeys, despite permissions
+ authorized_keys = self.create_global_authorized_file(
+ "etc/ssh/userkeys/bobby", "dsa", keys
+ )
+
+ # /tmp/home/badguy/.ssh/authorized_keys = ssh-xmss@openssh.com
+ self.create_user_authorized_file(
+ home_badguy, "authorized_keys", "ssh-xmss@openssh.com", keys
+ )
+
+ # /tmp/etc/ssh/userkeys/badguy = ecdsa
+ authorized_keys2 = self.create_global_authorized_file(
+ "etc/ssh/userkeys/badguy", "ecdsa", keys
+ )
+
+ # /tmp/sshd_config
+ options = "/tmp/etc/ssh/userkeys/%u .ssh/authorized_keys"
+ sshd_config = self.create_sshd_config(options)
+
+ self.execute_and_check(
+ user_bobby, sshd_config, authorized_keys, keys, delete_keys=False
+ )
+ self.execute_and_check(
+ user_badguy, sshd_config, authorized_keys2, keys
+ )
+
+ @patch("cloudinit.util.get_user_groups")
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_two_users_hardcoded_single_user_file(
+ self,
+ m_get_group,
+ m_get_owner,
+ m_get_permissions,
+ m_getpwnam,
+ m_get_user_groups,
+ ):
+ keys = {}
+ users = {}
+ mock_permissions = {
+ "/tmp/home/bobby": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600),
+ "/tmp/home/suzie": ("suzie", "suzie", 0o700),
+ "/tmp/home/suzie/.ssh": ("suzie", "suzie", 0o700),
+ "/tmp/home/suzie/.ssh/authorized_keys": ("suzie", "suzie", 0o600),
+ }
+
+ user_bobby = "bobby"
+ user_suzie = "suzie"
+ homes = self.create_fake_users(
+ [user_bobby, user_suzie],
+ mock_permissions,
+ m_get_group,
+ m_get_owner,
+ m_get_permissions,
+ m_getpwnam,
+ users,
+ )
+ home_bobby = homes[0]
+ home_suzie = homes[1]
+ m_get_user_groups.side_effect = mock_get_user_groups
+
+ # /tmp/home/bobby/.ssh/authorized_keys = rsa
+ authorized_keys = self.create_user_authorized_file(
+ home_bobby, "authorized_keys", "rsa", keys
+ )
+
+ # /tmp/home/suzie/.ssh/authorized_keys = ssh-xmss@openssh.com
+ self.create_user_authorized_file(
+ home_suzie, "authorized_keys", "ssh-xmss@openssh.com", keys
+ )
+
+ # /tmp/sshd_config
+ options = "%s" % (authorized_keys)
+ sshd_config = self.create_sshd_config(options)
+
+ self.execute_and_check(
+ user_bobby, sshd_config, authorized_keys, keys, delete_keys=False
+ )
+ default = "%s/.ssh/authorized_keys" % home_suzie
+ self.execute_and_check(user_suzie, sshd_config, default, keys)
+
+ @patch("cloudinit.util.get_user_groups")
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_two_users_hardcoded_single_user_file_inverted(
+ self,
+ m_get_group,
+ m_get_owner,
+ m_get_permissions,
+ m_getpwnam,
+ m_get_user_groups,
+ ):
+ keys = {}
+ users = {}
+ mock_permissions = {
+ "/tmp/home/bobby": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600),
+ "/tmp/home/suzie": ("suzie", "suzie", 0o700),
+ "/tmp/home/suzie/.ssh": ("suzie", "suzie", 0o700),
+ "/tmp/home/suzie/.ssh/authorized_keys": ("suzie", "suzie", 0o600),
+ }
+
+ user_bobby = "bobby"
+ user_suzie = "suzie"
+ homes = self.create_fake_users(
+ [user_bobby, user_suzie],
+ mock_permissions,
+ m_get_group,
+ m_get_owner,
+ m_get_permissions,
+ m_getpwnam,
+ users,
+ )
+ home_bobby = homes[0]
+ home_suzie = homes[1]
+ m_get_user_groups.side_effect = mock_get_user_groups
+
+ # /tmp/home/bobby/.ssh/authorized_keys = rsa
+ self.create_user_authorized_file(
+ home_bobby, "authorized_keys", "rsa", keys
+ )
+
+ # /tmp/home/suzie/.ssh/authorized_keys = ssh-xmss@openssh.com
+ authorized_keys2 = self.create_user_authorized_file(
+ home_suzie, "authorized_keys", "ssh-xmss@openssh.com", keys
+ )
+
+ # /tmp/sshd_config
+ options = "%s" % (authorized_keys2)
+ sshd_config = self.create_sshd_config(options)
+
+ default = "%s/.ssh/authorized_keys" % home_bobby
+ self.execute_and_check(
+ user_bobby, sshd_config, default, keys, delete_keys=False
+ )
+ self.execute_and_check(user_suzie, sshd_config, authorized_keys2, keys)
+
+ @patch("cloudinit.util.get_user_groups")
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ @patch("cloudinit.util.get_permissions")
+ @patch("cloudinit.util.get_owner")
+ @patch("cloudinit.util.get_group")
+ def test_two_users_hardcoded_user_files(
+ self,
+ m_get_group,
+ m_get_owner,
+ m_get_permissions,
+ m_getpwnam,
+ m_get_user_groups,
+ ):
+ keys = {}
+ users = {}
+ mock_permissions = {
+ "/tmp/home/bobby": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700),
+ "/tmp/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600),
+ "/tmp/home/suzie": ("suzie", "suzie", 0o700),
+ "/tmp/home/suzie/.ssh": ("suzie", "suzie", 0o700),
+ "/tmp/home/suzie/.ssh/authorized_keys": ("suzie", "suzie", 0o600),
+ }
+
+ user_bobby = "bobby"
+ user_suzie = "suzie"
+ homes = self.create_fake_users(
+ [user_bobby, user_suzie],
+ mock_permissions,
+ m_get_group,
+ m_get_owner,
+ m_get_permissions,
+ m_getpwnam,
+ users,
+ )
+ home_bobby = homes[0]
+ home_suzie = homes[1]
+ m_get_user_groups.side_effect = mock_get_user_groups
+
+ # /tmp/home/bobby/.ssh/authorized_keys = rsa
+ authorized_keys = self.create_user_authorized_file(
+ home_bobby, "authorized_keys", "rsa", keys
+ )
+
+ # /tmp/home/suzie/.ssh/authorized_keys = ssh-xmss@openssh.com
+ authorized_keys2 = self.create_user_authorized_file(
+ home_suzie, "authorized_keys", "ssh-xmss@openssh.com", keys
+ )
+
+ # /tmp/etc/ssh/authorized_keys = ecdsa
+ authorized_keys_global = self.create_global_authorized_file(
+ "etc/ssh/authorized_keys", "ecdsa", keys
+ )
+
+ # /tmp/sshd_config
+ options = "%s %s %s" % (
+ authorized_keys_global,
+ authorized_keys,
+ authorized_keys2,
+ )
+ sshd_config = self.create_sshd_config(options)
+
+ self.execute_and_check(
+ user_bobby, sshd_config, authorized_keys, keys, delete_keys=False
+ )
+ self.execute_and_check(user_suzie, sshd_config, authorized_keys2, keys)
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_stages.py b/tests/unittests/test_stages.py
new file mode 100644
index 00000000..3214410b
--- /dev/null
+++ b/tests/unittests/test_stages.py
@@ -0,0 +1,568 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Tests related to cloudinit.stages module."""
+import os
+import stat
+
+import pytest
+
+from cloudinit import sources, stages
+from cloudinit.event import EventScope, EventType
+from cloudinit.sources import NetworkConfigSource
+from cloudinit.util import write_file
+from tests.unittests.helpers import CiTestCase, mock
+
+TEST_INSTANCE_ID = "i-testing"
+
+
+class FakeDataSource(sources.DataSource):
+ def __init__(
+ self, paths=None, userdata=None, vendordata=None, network_config=""
+ ):
+ super(FakeDataSource, self).__init__({}, None, paths=paths)
+ self.metadata = {"instance-id": TEST_INSTANCE_ID}
+ self.userdata_raw = userdata
+ self.vendordata_raw = vendordata
+ self._network_config = None
+ if network_config: # Permit for None value to setup attribute
+ self._network_config = network_config
+
+ @property
+ def network_config(self):
+ return self._network_config
+
+ def _get_data(self):
+ return True
+
+
+class TestInit(CiTestCase):
+ with_logs = True
+ allowed_subp = False
+
+ def setUp(self):
+ super(TestInit, self).setUp()
+ self.tmpdir = self.tmp_dir()
+ self.init = stages.Init()
+ # Setup fake Paths for Init to reference
+ self.init._cfg = {
+ "system_info": {
+ "distro": "ubuntu",
+ "paths": {"cloud_dir": self.tmpdir, "run_dir": self.tmpdir},
+ }
+ }
+ self.init.datasource = FakeDataSource(paths=self.init.paths)
+ self._real_is_new_instance = self.init.is_new_instance
+ self.init.is_new_instance = mock.Mock(return_value=True)
+
+ def test_wb__find_networking_config_disabled(self):
+ """find_networking_config returns no config when disabled."""
+ disable_file = os.path.join(
+ self.init.paths.get_cpath("data"), "upgraded-network"
+ )
+ write_file(disable_file, "")
+ self.assertEqual(
+ (None, disable_file), self.init._find_networking_config()
+ )
+
+ @mock.patch("cloudinit.stages.cmdline.read_initramfs_config")
+ @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config")
+ def test_wb__find_networking_config_disabled_by_kernel(
+ self, m_cmdline, m_initramfs
+ ):
+ """find_networking_config returns when disabled by kernel cmdline."""
+ m_cmdline.return_value = {"config": "disabled"}
+ m_initramfs.return_value = {"config": ["fake_initrd"]}
+ self.assertEqual(
+ (None, NetworkConfigSource.cmdline),
+ self.init._find_networking_config(),
+ )
+ self.assertEqual(
+ "DEBUG: network config disabled by cmdline\n", self.logs.getvalue()
+ )
+
+ @mock.patch("cloudinit.stages.cmdline.read_initramfs_config")
+ @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config")
+ def test_wb__find_networking_config_disabled_by_initrd(
+ self, m_cmdline, m_initramfs
+ ):
+ """find_networking_config returns when disabled by kernel cmdline."""
+ m_cmdline.return_value = {}
+ m_initramfs.return_value = {"config": "disabled"}
+ self.assertEqual(
+ (None, NetworkConfigSource.initramfs),
+ self.init._find_networking_config(),
+ )
+ self.assertEqual(
+ "DEBUG: network config disabled by initramfs\n",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("cloudinit.stages.cmdline.read_initramfs_config")
+ @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config")
+ def test_wb__find_networking_config_disabled_by_datasrc(
+ self, m_cmdline, m_initramfs
+ ):
+ """find_networking_config returns when disabled by datasource cfg."""
+ m_cmdline.return_value = {} # Kernel doesn't disable networking
+ m_initramfs.return_value = {} # initramfs doesn't disable networking
+ self.init._cfg = {
+ "system_info": {"paths": {"cloud_dir": self.tmpdir}},
+ "network": {},
+ } # system config doesn't disable
+
+ self.init.datasource = FakeDataSource(
+ network_config={"config": "disabled"}
+ )
+ self.assertEqual(
+ (None, NetworkConfigSource.ds), self.init._find_networking_config()
+ )
+ self.assertEqual(
+ "DEBUG: network config disabled by ds\n", self.logs.getvalue()
+ )
+
+ @mock.patch("cloudinit.stages.cmdline.read_initramfs_config")
+ @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config")
+ def test_wb__find_networking_config_disabled_by_sysconfig(
+ self, m_cmdline, m_initramfs
+ ):
+ """find_networking_config returns when disabled by system config."""
+ m_cmdline.return_value = {} # Kernel doesn't disable networking
+ m_initramfs.return_value = {} # initramfs doesn't disable networking
+ self.init._cfg = {
+ "system_info": {"paths": {"cloud_dir": self.tmpdir}},
+ "network": {"config": "disabled"},
+ }
+ self.assertEqual(
+ (None, NetworkConfigSource.system_cfg),
+ self.init._find_networking_config(),
+ )
+ self.assertEqual(
+ "DEBUG: network config disabled by system_cfg\n",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("cloudinit.stages.cmdline.read_initramfs_config")
+ @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config")
+ def test__find_networking_config_uses_datasrc_order(
+ self, m_cmdline, m_initramfs
+ ):
+ """find_networking_config should check sources in DS defined order"""
+ # cmdline and initramfs, which would normally be preferred over other
+ # sources, disable networking; in this case, though, the DS moves them
+ # later so its own config is preferred
+ m_cmdline.return_value = {"config": "disabled"}
+ m_initramfs.return_value = {"config": "disabled"}
+
+ ds_net_cfg = {"config": {"needle": True}}
+ self.init.datasource = FakeDataSource(network_config=ds_net_cfg)
+ self.init.datasource.network_config_sources = [
+ NetworkConfigSource.ds,
+ NetworkConfigSource.system_cfg,
+ NetworkConfigSource.cmdline,
+ NetworkConfigSource.initramfs,
+ ]
+
+ self.assertEqual(
+ (ds_net_cfg, NetworkConfigSource.ds),
+ self.init._find_networking_config(),
+ )
+
+ @mock.patch("cloudinit.stages.cmdline.read_initramfs_config")
+ @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config")
+ def test__find_networking_config_warns_if_datasrc_uses_invalid_src(
+ self, m_cmdline, m_initramfs
+ ):
+ """find_networking_config should check sources in DS defined order"""
+ ds_net_cfg = {"config": {"needle": True}}
+ self.init.datasource = FakeDataSource(network_config=ds_net_cfg)
+ self.init.datasource.network_config_sources = [
+ "invalid_src",
+ NetworkConfigSource.ds,
+ ]
+
+ self.assertEqual(
+ (ds_net_cfg, NetworkConfigSource.ds),
+ self.init._find_networking_config(),
+ )
+ self.assertIn(
+ "WARNING: data source specifies an invalid network"
+ " cfg_source: invalid_src",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("cloudinit.stages.cmdline.read_initramfs_config")
+ @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config")
+ def test__find_networking_config_warns_if_datasrc_uses_unavailable_src(
+ self, m_cmdline, m_initramfs
+ ):
+ """find_networking_config should check sources in DS defined order"""
+ ds_net_cfg = {"config": {"needle": True}}
+ self.init.datasource = FakeDataSource(network_config=ds_net_cfg)
+ self.init.datasource.network_config_sources = [
+ NetworkConfigSource.fallback,
+ NetworkConfigSource.ds,
+ ]
+
+ self.assertEqual(
+ (ds_net_cfg, NetworkConfigSource.ds),
+ self.init._find_networking_config(),
+ )
+ self.assertIn(
+ "WARNING: data source specifies an unavailable network"
+ " cfg_source: fallback",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("cloudinit.stages.cmdline.read_initramfs_config")
+ @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config")
+ def test_wb__find_networking_config_returns_kernel(
+ self, m_cmdline, m_initramfs
+ ):
+ """find_networking_config returns kernel cmdline config if present."""
+ expected_cfg = {"config": ["fakekernel"]}
+ m_cmdline.return_value = expected_cfg
+ m_initramfs.return_value = {"config": ["fake_initrd"]}
+ self.init._cfg = {
+ "system_info": {"paths": {"cloud_dir": self.tmpdir}},
+ "network": {"config": ["fakesys_config"]},
+ }
+ self.init.datasource = FakeDataSource(
+ network_config={"config": ["fakedatasource"]}
+ )
+ self.assertEqual(
+ (expected_cfg, NetworkConfigSource.cmdline),
+ self.init._find_networking_config(),
+ )
+
+ @mock.patch("cloudinit.stages.cmdline.read_initramfs_config")
+ @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config")
+ def test_wb__find_networking_config_returns_initramfs(
+ self, m_cmdline, m_initramfs
+ ):
+ """find_networking_config returns kernel cmdline config if present."""
+ expected_cfg = {"config": ["fake_initrd"]}
+ m_cmdline.return_value = {}
+ m_initramfs.return_value = expected_cfg
+ self.init._cfg = {
+ "system_info": {"paths": {"cloud_dir": self.tmpdir}},
+ "network": {"config": ["fakesys_config"]},
+ }
+ self.init.datasource = FakeDataSource(
+ network_config={"config": ["fakedatasource"]}
+ )
+ self.assertEqual(
+ (expected_cfg, NetworkConfigSource.initramfs),
+ self.init._find_networking_config(),
+ )
+
+ @mock.patch("cloudinit.stages.cmdline.read_initramfs_config")
+ @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config")
+ def test_wb__find_networking_config_returns_system_cfg(
+ self, m_cmdline, m_initramfs
+ ):
+ """find_networking_config returns system config when present."""
+ m_cmdline.return_value = {} # No kernel network config
+ m_initramfs.return_value = {} # no initramfs network config
+ expected_cfg = {"config": ["fakesys_config"]}
+ self.init._cfg = {
+ "system_info": {"paths": {"cloud_dir": self.tmpdir}},
+ "network": expected_cfg,
+ }
+ self.init.datasource = FakeDataSource(
+ network_config={"config": ["fakedatasource"]}
+ )
+ self.assertEqual(
+ (expected_cfg, NetworkConfigSource.system_cfg),
+ self.init._find_networking_config(),
+ )
+
+ @mock.patch("cloudinit.stages.cmdline.read_initramfs_config")
+ @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config")
+ def test_wb__find_networking_config_returns_datasrc_cfg(
+ self, m_cmdline, m_initramfs
+ ):
+ """find_networking_config returns datasource net config if present."""
+ m_cmdline.return_value = {} # No kernel network config
+ m_initramfs.return_value = {} # no initramfs network config
+ # No system config for network in setUp
+ expected_cfg = {"config": ["fakedatasource"]}
+ self.init.datasource = FakeDataSource(network_config=expected_cfg)
+ self.assertEqual(
+ (expected_cfg, NetworkConfigSource.ds),
+ self.init._find_networking_config(),
+ )
+
+ @mock.patch("cloudinit.stages.cmdline.read_initramfs_config")
+ @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config")
+ def test_wb__find_networking_config_returns_fallback(
+ self, m_cmdline, m_initramfs
+ ):
+ """find_networking_config returns fallback config if not defined."""
+ m_cmdline.return_value = {} # Kernel doesn't disable networking
+ m_initramfs.return_value = {} # no initramfs network config
+ # Neither datasource nor system_info disable or provide network
+
+ fake_cfg = {
+ "config": [{"type": "physical", "name": "eth9"}],
+ "version": 1,
+ }
+
+ def fake_generate_fallback():
+ return fake_cfg
+
+ # Monkey patch distro which gets cached on self.init
+ distro = self.init.distro
+ distro.generate_fallback_config = fake_generate_fallback
+ self.assertEqual(
+ (fake_cfg, NetworkConfigSource.fallback),
+ self.init._find_networking_config(),
+ )
+ self.assertNotIn("network config disabled", self.logs.getvalue())
+
+ def test_apply_network_config_disabled(self):
+ """Log when network is disabled by upgraded-network."""
+ disable_file = os.path.join(
+ self.init.paths.get_cpath("data"), "upgraded-network"
+ )
+
+ def fake_network_config():
+ return (None, disable_file)
+
+ self.init._find_networking_config = fake_network_config
+
+ self.init.apply_network_config(True)
+ self.assertIn(
+ "INFO: network config is disabled by %s" % disable_file,
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("cloudinit.net.get_interfaces_by_mac")
+ @mock.patch("cloudinit.distros.ubuntu.Distro")
+ def test_apply_network_on_new_instance(self, m_ubuntu, m_macs):
+ """Call distro apply_network_config methods on is_new_instance."""
+ net_cfg = {
+ "version": 1,
+ "config": [
+ {
+ "subnets": [{"type": "dhcp"}],
+ "type": "physical",
+ "name": "eth9",
+ "mac_address": "42:42:42:42:42:42",
+ }
+ ],
+ }
+
+ def fake_network_config():
+ return net_cfg, NetworkConfigSource.fallback
+
+ m_macs.return_value = {"42:42:42:42:42:42": "eth9"}
+
+ self.init._find_networking_config = fake_network_config
+
+ self.init.apply_network_config(True)
+ self.init.distro.apply_network_config_names.assert_called_with(net_cfg)
+ self.init.distro.apply_network_config.assert_called_with(
+ net_cfg, bring_up=True
+ )
+
+ @mock.patch("cloudinit.distros.ubuntu.Distro")
+ def test_apply_network_on_same_instance_id(self, m_ubuntu):
+ """Only call distro.apply_network_config_names on same instance id."""
+ self.init.is_new_instance = self._real_is_new_instance
+ old_instance_id = os.path.join(
+ self.init.paths.get_cpath("data"), "instance-id"
+ )
+ write_file(old_instance_id, TEST_INSTANCE_ID)
+ net_cfg = {
+ "version": 1,
+ "config": [
+ {
+ "subnets": [{"type": "dhcp"}],
+ "type": "physical",
+ "name": "eth9",
+ "mac_address": "42:42:42:42:42:42",
+ }
+ ],
+ }
+
+ def fake_network_config():
+ return net_cfg, NetworkConfigSource.fallback
+
+ self.init._find_networking_config = fake_network_config
+
+ self.init.apply_network_config(True)
+ self.init.distro.apply_network_config_names.assert_called_with(net_cfg)
+ self.init.distro.apply_network_config.assert_not_called()
+ assert (
+ "No network config applied. Neither a new instance nor datasource "
+ "network update allowed" in self.logs.getvalue()
+ )
+
+ # CiTestCase doesn't work with pytest.mark.parametrize, and moving this
+ # functionality to a separate class is more cumbersome than it'd be worth
+ # at the moment, so use this as a simple setup
+ def _apply_network_setup(self, m_macs):
+ old_instance_id = os.path.join(
+ self.init.paths.get_cpath("data"), "instance-id"
+ )
+ write_file(old_instance_id, TEST_INSTANCE_ID)
+ net_cfg = {
+ "version": 1,
+ "config": [
+ {
+ "subnets": [{"type": "dhcp"}],
+ "type": "physical",
+ "name": "eth9",
+ "mac_address": "42:42:42:42:42:42",
+ }
+ ],
+ }
+
+ def fake_network_config():
+ return net_cfg, NetworkConfigSource.fallback
+
+ m_macs.return_value = {"42:42:42:42:42:42": "eth9"}
+
+ self.init._find_networking_config = fake_network_config
+ self.init.datasource = FakeDataSource(paths=self.init.paths)
+ self.init.is_new_instance = mock.Mock(return_value=False)
+ return net_cfg
+
+ @mock.patch("cloudinit.net.get_interfaces_by_mac")
+ @mock.patch("cloudinit.distros.ubuntu.Distro")
+ @mock.patch.dict(
+ sources.DataSource.default_update_events,
+ {EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE, EventType.BOOT}},
+ )
+ def test_apply_network_allowed_when_default_boot(self, m_ubuntu, m_macs):
+ """Apply network if datasource permits BOOT event."""
+ net_cfg = self._apply_network_setup(m_macs)
+
+ self.init.apply_network_config(True)
+ assert (
+ mock.call(net_cfg)
+ == self.init.distro.apply_network_config_names.call_args_list[-1]
+ )
+ assert (
+ mock.call(net_cfg, bring_up=True)
+ == self.init.distro.apply_network_config.call_args_list[-1]
+ )
+
+ @mock.patch("cloudinit.net.get_interfaces_by_mac")
+ @mock.patch("cloudinit.distros.ubuntu.Distro")
+ @mock.patch.dict(
+ sources.DataSource.default_update_events,
+ {EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}},
+ )
+ def test_apply_network_disabled_when_no_default_boot(
+ self, m_ubuntu, m_macs
+ ):
+ """Don't apply network if datasource has no BOOT event."""
+ self._apply_network_setup(m_macs)
+ self.init.apply_network_config(True)
+ self.init.distro.apply_network_config.assert_not_called()
+ assert (
+ "No network config applied. Neither a new instance nor datasource "
+ "network update allowed" in self.logs.getvalue()
+ )
+
+ @mock.patch("cloudinit.net.get_interfaces_by_mac")
+ @mock.patch("cloudinit.distros.ubuntu.Distro")
+ @mock.patch.dict(
+ sources.DataSource.default_update_events,
+ {EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}},
+ )
+ def test_apply_network_allowed_with_userdata_overrides(
+ self, m_ubuntu, m_macs
+ ):
+ """Apply network if userdata overrides default config"""
+ net_cfg = self._apply_network_setup(m_macs)
+ self.init._cfg = {"updates": {"network": {"when": ["boot"]}}}
+ self.init.apply_network_config(True)
+ self.init.distro.apply_network_config_names.assert_called_with(net_cfg)
+ self.init.distro.apply_network_config.assert_called_with(
+ net_cfg, bring_up=True
+ )
+
+ @mock.patch("cloudinit.net.get_interfaces_by_mac")
+ @mock.patch("cloudinit.distros.ubuntu.Distro")
+ @mock.patch.dict(
+ sources.DataSource.supported_update_events,
+ {EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}},
+ )
+ def test_apply_network_disabled_when_unsupported(self, m_ubuntu, m_macs):
+ """Don't apply network config if unsupported.
+
+ Shouldn't work even when specified as userdata
+ """
+ self._apply_network_setup(m_macs)
+
+ self.init._cfg = {"updates": {"network": {"when": ["boot"]}}}
+ self.init.apply_network_config(True)
+ self.init.distro.apply_network_config.assert_not_called()
+ assert (
+ "No network config applied. Neither a new instance nor datasource "
+ "network update allowed" in self.logs.getvalue()
+ )
+
+
+class TestInit_InitializeFilesystem:
+ """Tests for cloudinit.stages.Init._initialize_filesystem.
+
+ TODO: Expand these tests to cover all of _initialize_filesystem's behavior.
+ """
+
+ @pytest.fixture
+ def init(self, paths):
+ """A fixture which yields a stages.Init instance with paths and cfg set
+
+ As it is replaced with a mock, consumers of this fixture can set
+ `init._cfg` if the default empty dict configuration is not appropriate.
+ """
+ with mock.patch("cloudinit.stages.util.ensure_dirs"):
+ init = stages.Init()
+ init._cfg = {}
+ init._paths = paths
+ yield init
+
+ @mock.patch("cloudinit.stages.util.ensure_file")
+ def test_ensure_file_not_called_if_no_log_file_configured(
+ self, m_ensure_file, init
+ ):
+ """If no log file is configured, we should not ensure its existence."""
+ init._cfg = {}
+
+ init._initialize_filesystem()
+
+ assert 0 == m_ensure_file.call_count
+
+ def test_log_files_existence_is_ensured_if_configured(self, init, tmpdir):
+ """If a log file is configured, we should ensure its existence."""
+ log_file = tmpdir.join("cloud-init.log")
+ init._cfg = {"def_log_file": str(log_file)}
+
+ init._initialize_filesystem()
+
+ assert log_file.exists()
+ # Assert we create it 0o640 by default if it doesn't already exist
+ assert 0o640 == stat.S_IMODE(log_file.stat().mode)
+
+ def test_existing_file_permissions_are_not_modified(self, init, tmpdir):
+ """If the log file already exists, we should not modify its permissions
+
+ See https://bugs.launchpad.net/cloud-init/+bug/1900837.
+ """
+ # Use a mode that will never be made the default so this test will
+ # always be valid
+ mode = 0o606
+ log_file = tmpdir.join("cloud-init.log")
+ log_file.ensure()
+ log_file.chmod(mode)
+ init._cfg = {"def_log_file": str(log_file)}
+
+ init._initialize_filesystem()
+
+ assert mode == stat.S_IMODE(log_file.stat().mode)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_subp.py b/tests/unittests/test_subp.py
new file mode 100644
index 00000000..7cd1339b
--- /dev/null
+++ b/tests/unittests/test_subp.py
@@ -0,0 +1,353 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Tests for cloudinit.subp utility functions"""
+
+import json
+import os
+import stat
+import sys
+from unittest import mock
+
+from cloudinit import subp, util
+from tests.unittests.helpers import CiTestCase, get_top_level_dir
+
+BASH = subp.which("bash")
+BOGUS_COMMAND = "this-is-not-expected-to-be-a-program-name"
+
+
+class TestPrependBaseCommands(CiTestCase):
+
+ with_logs = True
+
+ def test_prepend_base_command_errors_on_neither_string_nor_list(self):
+ """Raise an error for each command which is not a string or list."""
+ orig_commands = ["ls", 1, {"not": "gonna work"}, ["basecmd", "list"]]
+ with self.assertRaises(TypeError) as context_manager:
+ subp.prepend_base_command(
+ base_command="basecmd", commands=orig_commands
+ )
+ self.assertEqual(
+ "Invalid basecmd config. These commands are not a string or"
+ " list:\n1\n{'not': 'gonna work'}",
+ str(context_manager.exception),
+ )
+
+ def test_prepend_base_command_warns_on_non_base_string_commands(self):
+ """Warn on each non-base for commands of type string."""
+ orig_commands = [
+ "ls",
+ "basecmd list",
+ "touch /blah",
+ "basecmd install x",
+ ]
+ fixed_commands = subp.prepend_base_command(
+ base_command="basecmd", commands=orig_commands
+ )
+ self.assertEqual(
+ "WARNING: Non-basecmd commands in basecmd config:\n"
+ "ls\ntouch /blah\n",
+ self.logs.getvalue(),
+ )
+ self.assertEqual(orig_commands, fixed_commands)
+
+ def test_prepend_base_command_prepends_on_non_base_list_commands(self):
+ """Prepend 'basecmd' for each non-basecmd command of type list."""
+ orig_commands = [
+ ["ls"],
+ ["basecmd", "list"],
+ ["basecmda", "/blah"],
+ ["basecmd", "install", "x"],
+ ]
+ expected = [
+ ["basecmd", "ls"],
+ ["basecmd", "list"],
+ ["basecmd", "basecmda", "/blah"],
+ ["basecmd", "install", "x"],
+ ]
+ fixed_commands = subp.prepend_base_command(
+ base_command="basecmd", commands=orig_commands
+ )
+ self.assertEqual("", self.logs.getvalue())
+ self.assertEqual(expected, fixed_commands)
+
+ def test_prepend_base_command_removes_first_item_when_none(self):
+ """Remove the first element of a non-basecmd when it is None."""
+ orig_commands = [
+ [None, "ls"],
+ ["basecmd", "list"],
+ [None, "touch", "/blah"],
+ ["basecmd", "install", "x"],
+ ]
+ expected = [
+ ["ls"],
+ ["basecmd", "list"],
+ ["touch", "/blah"],
+ ["basecmd", "install", "x"],
+ ]
+ fixed_commands = subp.prepend_base_command(
+ base_command="basecmd", commands=orig_commands
+ )
+ self.assertEqual("", self.logs.getvalue())
+ self.assertEqual(expected, fixed_commands)
+
+
+class TestSubp(CiTestCase):
+ allowed_subp = [
+ BASH,
+ "cat",
+ CiTestCase.SUBP_SHELL_TRUE,
+ BOGUS_COMMAND,
+ sys.executable,
+ ]
+
+ stdin2err = [BASH, "-c", "cat >&2"]
+ stdin2out = ["cat"]
+ utf8_invalid = b"ab\xaadef"
+ utf8_valid = b"start \xc3\xa9 end"
+ utf8_valid_2 = b"d\xc3\xa9j\xc8\xa7"
+ printenv = [BASH, "-c", 'for n in "$@"; do echo "$n=${!n}"; done', "--"]
+
+ def printf_cmd(self, *args):
+ # bash's printf supports \xaa. So does /usr/bin/printf
+ # but by using bash, we remove dependency on another program.
+ return [BASH, "-c", 'printf "$@"', "printf"] + list(args)
+
+ def test_subp_handles_bytestrings(self):
+ """subp can run a bytestring command if shell is True."""
+ tmp_file = self.tmp_path("test.out")
+ cmd = "echo HI MOM >> {tmp_file}".format(tmp_file=tmp_file)
+ (out, _err) = subp.subp(cmd.encode("utf-8"), shell=True)
+ self.assertEqual("", out)
+ self.assertEqual("", _err)
+ self.assertEqual("HI MOM\n", util.load_file(tmp_file))
+
+ def test_subp_handles_strings(self):
+ """subp can run a string command if shell is True."""
+ tmp_file = self.tmp_path("test.out")
+ cmd = "echo HI MOM >> {tmp_file}".format(tmp_file=tmp_file)
+ (out, _err) = subp.subp(cmd, shell=True)
+ self.assertEqual("", out)
+ self.assertEqual("", _err)
+ self.assertEqual("HI MOM\n", util.load_file(tmp_file))
+
+ def test_subp_handles_utf8(self):
+ # The given bytes contain utf-8 accented characters as seen in e.g.
+ # the "deja dup" package in Ubuntu.
+ cmd = self.printf_cmd(self.utf8_valid_2)
+ (out, _err) = subp.subp(cmd, capture=True)
+ self.assertEqual(out, self.utf8_valid_2.decode("utf-8"))
+
+ def test_subp_respects_decode_false(self):
+ (out, err) = subp.subp(
+ self.stdin2out, capture=True, decode=False, data=self.utf8_valid
+ )
+ self.assertTrue(isinstance(out, bytes))
+ self.assertTrue(isinstance(err, bytes))
+ self.assertEqual(out, self.utf8_valid)
+
+ def test_subp_decode_ignore(self):
+ # this executes a string that writes invalid utf-8 to stdout
+ (out, _err) = subp.subp(
+ self.printf_cmd("abc\\xaadef"), capture=True, decode="ignore"
+ )
+ self.assertEqual(out, "abcdef")
+
+ def test_subp_decode_strict_valid_utf8(self):
+ (out, _err) = subp.subp(
+ self.stdin2out, capture=True, decode="strict", data=self.utf8_valid
+ )
+ self.assertEqual(out, self.utf8_valid.decode("utf-8"))
+
+ def test_subp_decode_invalid_utf8_replaces(self):
+ (out, _err) = subp.subp(
+ self.stdin2out, capture=True, data=self.utf8_invalid
+ )
+ expected = self.utf8_invalid.decode("utf-8", "replace")
+ self.assertEqual(out, expected)
+
+ def test_subp_decode_strict_raises(self):
+ args = []
+ kwargs = {
+ "args": self.stdin2out,
+ "capture": True,
+ "decode": "strict",
+ "data": self.utf8_invalid,
+ }
+ self.assertRaises(UnicodeDecodeError, subp.subp, *args, **kwargs)
+
+ def test_subp_capture_stderr(self):
+ data = b"hello world"
+ (out, err) = subp.subp(
+ self.stdin2err,
+ capture=True,
+ decode=False,
+ data=data,
+ update_env={"LC_ALL": "C"},
+ )
+ self.assertEqual(err, data)
+ self.assertEqual(out, b"")
+
+ def test_subp_reads_env(self):
+ with mock.patch.dict("os.environ", values={"FOO": "BAR"}):
+ out, _err = subp.subp(self.printenv + ["FOO"], capture=True)
+ self.assertEqual("FOO=BAR", out.splitlines()[0])
+
+ def test_subp_env_and_update_env(self):
+ out, _err = subp.subp(
+ self.printenv + ["FOO", "HOME", "K1", "K2"],
+ capture=True,
+ env={"FOO": "BAR"},
+ update_env={"HOME": "/myhome", "K2": "V2"},
+ )
+ self.assertEqual(
+ ["FOO=BAR", "HOME=/myhome", "K1=", "K2=V2"], out.splitlines()
+ )
+
+ def test_subp_update_env(self):
+ extra = {"FOO": "BAR", "HOME": "/root", "K1": "V1"}
+ with mock.patch.dict("os.environ", values=extra):
+ out, _err = subp.subp(
+ self.printenv + ["FOO", "HOME", "K1", "K2"],
+ capture=True,
+ update_env={"HOME": "/myhome", "K2": "V2"},
+ )
+
+ self.assertEqual(
+ ["FOO=BAR", "HOME=/myhome", "K1=V1", "K2=V2"], out.splitlines()
+ )
+
+ def test_subp_warn_missing_shebang(self):
+ """Warn on no #! in script"""
+ noshebang = self.tmp_path("noshebang")
+ util.write_file(noshebang, "true\n")
+
+ print("os is %s" % os)
+ os.chmod(noshebang, os.stat(noshebang).st_mode | stat.S_IEXEC)
+ with self.allow_subp([noshebang]):
+ self.assertRaisesRegex(
+ subp.ProcessExecutionError,
+ r"Missing #! in script\?",
+ subp.subp,
+ (noshebang,),
+ )
+
+ def test_subp_combined_stderr_stdout(self):
+ """Providing combine_capture as True redirects stderr to stdout."""
+ data = b"hello world"
+ (out, err) = subp.subp(
+ self.stdin2err,
+ capture=True,
+ combine_capture=True,
+ decode=False,
+ data=data,
+ )
+ self.assertEqual(b"", err)
+ self.assertEqual(data, out)
+
+ def test_returns_none_if_no_capture(self):
+ (out, err) = subp.subp(self.stdin2out, data=b"", capture=False)
+ self.assertIsNone(err)
+ self.assertIsNone(out)
+
+ def test_exception_has_out_err_are_bytes_if_decode_false(self):
+ """Raised exc should have stderr, stdout as bytes if no decode."""
+ with self.assertRaises(subp.ProcessExecutionError) as cm:
+ subp.subp([BOGUS_COMMAND], decode=False)
+ self.assertTrue(isinstance(cm.exception.stdout, bytes))
+ self.assertTrue(isinstance(cm.exception.stderr, bytes))
+
+ def test_exception_has_out_err_are_bytes_if_decode_true(self):
+ """Raised exc should have stderr, stdout as string if no decode."""
+ with self.assertRaises(subp.ProcessExecutionError) as cm:
+ subp.subp([BOGUS_COMMAND], decode=True)
+ self.assertTrue(isinstance(cm.exception.stdout, str))
+ self.assertTrue(isinstance(cm.exception.stderr, str))
+
+ def test_bunch_of_slashes_in_path(self):
+ self.assertEqual(
+ "/target/my/path/", subp.target_path("/target/", "//my/path/")
+ )
+ self.assertEqual(
+ "/target/my/path/", subp.target_path("/target/", "///my/path/")
+ )
+
+ def test_c_lang_can_take_utf8_args(self):
+ """Independent of system LC_CTYPE, args can contain utf-8 strings.
+
+ When python starts up, its default encoding gets set based on
+ the value of LC_CTYPE. If no system locale is set, the default
+ encoding for both python2 and python3 in some paths will end up
+ being ascii.
+
+ Attempts to use setlocale or patching (or changing) os.environ
+ in the current environment seem to not be effective.
+
+ This test starts up a python with LC_CTYPE set to C so that
+ the default encoding will be set to ascii. In such an environment
+ Popen(['command', 'non-ascii-arg']) would cause a UnicodeDecodeError.
+ """
+ python_prog = "\n".join(
+ [
+ "import json, sys",
+ 'sys.path.insert(0, "{}")'.format(get_top_level_dir()),
+ "from cloudinit.subp import subp",
+ "data = sys.stdin.read()",
+ "cmd = json.loads(data)",
+ "subp(cmd, capture=False)",
+ "",
+ ]
+ )
+ cmd = [
+ BASH,
+ "-c",
+ 'echo -n "$@"',
+ "--",
+ self.utf8_valid.decode("utf-8"),
+ ]
+ python_subp = [sys.executable, "-c", python_prog]
+
+ out, _err = subp.subp(
+ python_subp,
+ update_env={"LC_CTYPE": "C"},
+ data=json.dumps(cmd).encode("utf-8"),
+ decode=False,
+ )
+ self.assertEqual(self.utf8_valid, out)
+
+ def test_bogus_command_logs_status_messages(self):
+ """status_cb gets status messages logs on bogus commands provided."""
+ logs = []
+
+ def status_cb(log):
+ logs.append(log)
+
+ with self.assertRaises(subp.ProcessExecutionError):
+ subp.subp([BOGUS_COMMAND], status_cb=status_cb)
+
+ expected = [
+ "Begin run command: {cmd}\n".format(cmd=BOGUS_COMMAND),
+ "ERROR: End run command: invalid command provided\n",
+ ]
+ self.assertEqual(expected, logs)
+
+ def test_command_logs_exit_codes_to_status_cb(self):
+ """status_cb gets status messages containing command exit code."""
+ logs = []
+
+ def status_cb(log):
+ logs.append(log)
+
+ with self.assertRaises(subp.ProcessExecutionError):
+ subp.subp([BASH, "-c", "exit 2"], status_cb=status_cb)
+ subp.subp([BASH, "-c", "exit 0"], status_cb=status_cb)
+
+ expected = [
+ "Begin run command: %s -c exit 2\n" % BASH,
+ "ERROR: End run command: exit(2)\n",
+ "Begin run command: %s -c exit 0\n" % BASH,
+ "End run command: exit(0)\n",
+ ]
+ self.assertEqual(expected, logs)
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_temp_utils.py b/tests/unittests/test_temp_utils.py
new file mode 100644
index 00000000..e91f389b
--- /dev/null
+++ b/tests/unittests/test_temp_utils.py
@@ -0,0 +1,135 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Tests for cloudinit.temp_utils"""
+
+import os
+
+from cloudinit.temp_utils import mkdtemp, mkstemp, tempdir
+from tests.unittests.helpers import CiTestCase, wrap_and_call
+
+
+class TestTempUtils(CiTestCase):
+ def test_mkdtemp_default_non_root(self):
+ """mkdtemp creates a dir under /tmp for the unprivileged."""
+ calls = []
+
+ def fake_mkdtemp(*args, **kwargs):
+ calls.append(kwargs)
+ return "/fake/return/path"
+
+ retval = wrap_and_call(
+ "cloudinit.temp_utils",
+ {
+ "os.getuid": 1000,
+ "tempfile.mkdtemp": {"side_effect": fake_mkdtemp},
+ "_TMPDIR": {"new": None},
+ "os.path.isdir": True,
+ },
+ mkdtemp,
+ )
+ self.assertEqual("/fake/return/path", retval)
+ self.assertEqual([{"dir": "/tmp"}], calls)
+
+ def test_mkdtemp_default_non_root_needs_exe(self):
+ """mkdtemp creates a dir under /var/tmp/cloud-init when needs_exe."""
+ calls = []
+
+ def fake_mkdtemp(*args, **kwargs):
+ calls.append(kwargs)
+ return "/fake/return/path"
+
+ retval = wrap_and_call(
+ "cloudinit.temp_utils",
+ {
+ "os.getuid": 1000,
+ "tempfile.mkdtemp": {"side_effect": fake_mkdtemp},
+ "_TMPDIR": {"new": None},
+ "os.path.isdir": True,
+ },
+ mkdtemp,
+ needs_exe=True,
+ )
+ self.assertEqual("/fake/return/path", retval)
+ self.assertEqual([{"dir": "/var/tmp/cloud-init"}], calls)
+
+ def test_mkdtemp_default_root(self):
+ """mkdtemp creates a dir under /run/cloud-init for the privileged."""
+ calls = []
+
+ def fake_mkdtemp(*args, **kwargs):
+ calls.append(kwargs)
+ return "/fake/return/path"
+
+ retval = wrap_and_call(
+ "cloudinit.temp_utils",
+ {
+ "os.getuid": 0,
+ "tempfile.mkdtemp": {"side_effect": fake_mkdtemp},
+ "_TMPDIR": {"new": None},
+ "os.path.isdir": True,
+ },
+ mkdtemp,
+ )
+ self.assertEqual("/fake/return/path", retval)
+ self.assertEqual([{"dir": "/run/cloud-init/tmp"}], calls)
+
+ def test_mkstemp_default_non_root(self):
+ """mkstemp creates secure tempfile under /tmp for the unprivileged."""
+ calls = []
+
+ def fake_mkstemp(*args, **kwargs):
+ calls.append(kwargs)
+ return "/fake/return/path"
+
+ retval = wrap_and_call(
+ "cloudinit.temp_utils",
+ {
+ "os.getuid": 1000,
+ "tempfile.mkstemp": {"side_effect": fake_mkstemp},
+ "_TMPDIR": {"new": None},
+ "os.path.isdir": True,
+ },
+ mkstemp,
+ )
+ self.assertEqual("/fake/return/path", retval)
+ self.assertEqual([{"dir": "/tmp"}], calls)
+
+ def test_mkstemp_default_root(self):
+ """mkstemp creates a secure tempfile in /run/cloud-init for root."""
+ calls = []
+
+ def fake_mkstemp(*args, **kwargs):
+ calls.append(kwargs)
+ return "/fake/return/path"
+
+ retval = wrap_and_call(
+ "cloudinit.temp_utils",
+ {
+ "os.getuid": 0,
+ "tempfile.mkstemp": {"side_effect": fake_mkstemp},
+ "_TMPDIR": {"new": None},
+ "os.path.isdir": True,
+ },
+ mkstemp,
+ )
+ self.assertEqual("/fake/return/path", retval)
+ self.assertEqual([{"dir": "/run/cloud-init/tmp"}], calls)
+
+ def test_tempdir_error_suppression(self):
+ """test tempdir suppresses errors during directory removal."""
+
+ with self.assertRaises(OSError):
+ with tempdir(prefix="cloud-init-dhcp-") as tdir:
+ os.rmdir(tdir)
+ # As a result, the directory is already gone,
+ # so shutil.rmtree should raise OSError
+
+ with tempdir(
+ rmtree_ignore_errors=True, prefix="cloud-init-dhcp-"
+ ) as tdir:
+ os.rmdir(tdir)
+ # Since the directory is already gone, shutil.rmtree would raise
+ # OSError, but we suppress that
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_templating.py b/tests/unittests/test_templating.py
index cba09830..c1fec27c 100644
--- a/tests/unittests/test_templating.py
+++ b/tests/unittests/test_templating.py
@@ -4,14 +4,15 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.tests import helpers as test_helpers
import textwrap
from cloudinit import templater
from cloudinit.util import load_file, write_file
+from tests.unittests import helpers as test_helpers
try:
import Cheetah
+
HAS_CHEETAH = True
c = Cheetah # make pyflakes and pylint happy, as Cheetah is not used here
except ImportError:
@@ -22,32 +23,36 @@ class TestTemplates(test_helpers.CiTestCase):
with_logs = True
- jinja_utf8 = b'It\xe2\x80\x99s not ascii, {{name}}\n'
- jinja_utf8_rbob = b'It\xe2\x80\x99s not ascii, bob\n'.decode('utf-8')
+ jinja_utf8 = b"It\xe2\x80\x99s not ascii, {{name}}\n"
+ jinja_utf8_rbob = b"It\xe2\x80\x99s not ascii, bob\n".decode("utf-8")
@staticmethod
def add_header(renderer, data):
"""Return text (py2 unicode/py3 str) with template header."""
if isinstance(data, bytes):
- data = data.decode('utf-8')
+ data = data.decode("utf-8")
return "## template: %s\n" % renderer + data
def test_render_basic(self):
- in_data = textwrap.dedent("""
+ in_data = textwrap.dedent(
+ """
${b}
c = d
- """)
+ """
+ )
in_data = in_data.strip()
- expected_data = textwrap.dedent("""
+ expected_data = textwrap.dedent(
+ """
2
c = d
- """)
- out_data = templater.basic_render(in_data, {'b': 2})
+ """
+ )
+ out_data = templater.basic_render(in_data, {"b": 2})
self.assertEqual(expected_data.strip(), out_data)
- @test_helpers.skipIf(not HAS_CHEETAH, 'cheetah renderer not available')
+ @test_helpers.skipIf(not HAS_CHEETAH, "cheetah renderer not available")
def test_detection(self):
blob = "## template:cheetah"
@@ -60,28 +65,28 @@ class TestTemplates(test_helpers.CiTestCase):
self.assertIn("cheetah", template_type)
self.assertEqual(blob, contents)
- blob = '##template:something-new'
+ blob = "##template:something-new"
self.assertRaises(ValueError, templater.detect_template, blob)
def test_render_cheetah(self):
- blob = '''## template:cheetah
-$a,$b'''
+ blob = """## template:cheetah
+$a,$b"""
c = templater.render_string(blob, {"a": 1, "b": 2})
self.assertEqual("1,2", c)
def test_render_jinja(self):
- blob = '''## template:jinja
-{{a}},{{b}}'''
+ blob = """## template:jinja
+{{a}},{{b}}"""
c = templater.render_string(blob, {"a": 1, "b": 2})
self.assertEqual("1,2", c)
def test_render_default(self):
- blob = '''$a,$b'''
+ blob = """$a,$b"""
c = templater.render_string(blob, {"a": 1, "b": 2})
self.assertEqual("1,2", c)
def test_render_basic_deeper(self):
- hn = 'myfoohost.yahoo.com'
+ hn = "myfoohost.yahoo.com"
expected_data = "h=%s\nc=d\n" % hn
in_data = "h=$hostname.canonical_name\nc=d\n"
params = {
@@ -96,59 +101,69 @@ $a,$b'''
hn = "myfoohost"
in_data = "h=$hostname\nc=d\n"
expected_data = "h=%s\nc=d\n" % hn
- out_data = templater.basic_render(in_data, {'hostname': hn})
+ out_data = templater.basic_render(in_data, {"hostname": hn})
self.assertEqual(expected_data, out_data)
def test_render_basic_parens(self):
hn = "myfoohost"
in_data = "h = ${hostname}\nc=d\n"
expected_data = "h = %s\nc=d\n" % hn
- out_data = templater.basic_render(in_data, {'hostname': hn})
+ out_data = templater.basic_render(in_data, {"hostname": hn})
self.assertEqual(expected_data, out_data)
def test_render_basic2(self):
mirror = "mymirror"
codename = "zany"
in_data = "deb $mirror $codename-updates main contrib non-free"
- ex_data = "deb %s %s-updates main contrib non-free" % (mirror,
- codename)
-
- out_data = templater.basic_render(in_data,
- {'mirror': mirror,
- 'codename': codename})
+ ex_data = "deb %s %s-updates main contrib non-free" % (
+ mirror,
+ codename,
+ )
+
+ out_data = templater.basic_render(
+ in_data, {"mirror": mirror, "codename": codename}
+ )
self.assertEqual(ex_data, out_data)
def test_jinja_nonascii_render_to_string(self):
"""Test jinja render_to_string with non-ascii content."""
self.assertEqual(
templater.render_string(
- self.add_header("jinja", self.jinja_utf8), {"name": "bob"}),
- self.jinja_utf8_rbob)
+ self.add_header("jinja", self.jinja_utf8), {"name": "bob"}
+ ),
+ self.jinja_utf8_rbob,
+ )
def test_jinja_nonascii_render_undefined_variables_to_default_py3(self):
"""Test py3 jinja render_to_string with undefined variable default."""
self.assertEqual(
templater.render_string(
- self.add_header("jinja", self.jinja_utf8), {}),
- self.jinja_utf8_rbob.replace('bob', 'CI_MISSING_JINJA_VAR/name'))
+ self.add_header("jinja", self.jinja_utf8), {}
+ ),
+ self.jinja_utf8_rbob.replace("bob", "CI_MISSING_JINJA_VAR/name"),
+ )
def test_jinja_nonascii_render_to_file(self):
"""Test jinja render_to_file of a filename with non-ascii content."""
tmpl_fn = self.tmp_path("j-render-to-file.template")
out_fn = self.tmp_path("j-render-to-file.out")
- write_file(filename=tmpl_fn, omode="wb",
- content=self.add_header(
- "jinja", self.jinja_utf8).encode('utf-8'))
+ write_file(
+ filename=tmpl_fn,
+ omode="wb",
+ content=self.add_header("jinja", self.jinja_utf8).encode("utf-8"),
+ )
templater.render_to_file(tmpl_fn, out_fn, {"name": "bob"})
- result = load_file(out_fn, decode=False).decode('utf-8')
+ result = load_file(out_fn, decode=False).decode("utf-8")
self.assertEqual(result, self.jinja_utf8_rbob)
def test_jinja_nonascii_render_from_file(self):
"""Test jinja render_from_file with non-ascii content."""
tmpl_fn = self.tmp_path("j-render-from-file.template")
- write_file(tmpl_fn, omode="wb",
- content=self.add_header(
- "jinja", self.jinja_utf8).encode('utf-8'))
+ write_file(
+ tmpl_fn,
+ omode="wb",
+ content=self.add_header("jinja", self.jinja_utf8).encode("utf-8"),
+ )
result = templater.render_from_file(tmpl_fn, {"name": "bob"})
self.assertEqual(result, self.jinja_utf8_rbob)
@@ -156,14 +171,18 @@ $a,$b'''
def test_jinja_warns_on_missing_dep_and_uses_basic_renderer(self):
"""Test jinja render_from_file will fallback to basic renderer."""
tmpl_fn = self.tmp_path("j-render-from-file.template")
- write_file(tmpl_fn, omode="wb",
- content=self.add_header(
- "jinja", self.jinja_utf8).encode('utf-8'))
+ write_file(
+ tmpl_fn,
+ omode="wb",
+ content=self.add_header("jinja", self.jinja_utf8).encode("utf-8"),
+ )
result = templater.render_from_file(tmpl_fn, {"name": "bob"})
self.assertEqual(result, self.jinja_utf8.decode())
self.assertIn(
- 'WARNING: Jinja not available as the selected renderer for desired'
- ' template, reverting to the basic renderer.',
- self.logs.getvalue())
+ "WARNING: Jinja not available as the selected renderer for desired"
+ " template, reverting to the basic renderer.",
+ self.logs.getvalue(),
+ )
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_upgrade.py b/tests/unittests/test_upgrade.py
new file mode 100644
index 00000000..d7a721a2
--- /dev/null
+++ b/tests/unittests/test_upgrade.py
@@ -0,0 +1,52 @@
+# Copyright (C) 2020 Canonical Ltd.
+#
+# Author: Daniel Watkins <oddbloke@ubuntu.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Upgrade testing for cloud-init.
+
+This module tests cloud-init's behaviour across upgrades. Specifically, it
+specifies a set of invariants that the current codebase expects to be true (as
+tests in ``TestUpgrade``) and then checks that these hold true after unpickling
+``obj.pkl``s from previous versions of cloud-init; those pickles are stored in
+``tests/data/old_pickles/``.
+"""
+
+import operator
+import pathlib
+
+import pytest
+
+from cloudinit.stages import _pkl_load
+from tests.unittests.helpers import resourceLocation
+
+
+class TestUpgrade:
+ @pytest.fixture(
+ params=pathlib.Path(resourceLocation("old_pickles")).glob("*.pkl"),
+ scope="class",
+ ids=operator.attrgetter("name"),
+ )
+ def previous_obj_pkl(self, request):
+ """Load each pickle to memory once, then run all tests against it.
+
+ Test implementations _must not_ modify the ``previous_obj_pkl`` which
+ they are passed, as that will affect tests that run after them.
+ """
+ return _pkl_load(str(request.param))
+
+ def test_networking_set_on_distro(self, previous_obj_pkl):
+ """We always expect to have ``.networking`` on ``Distro`` objects."""
+ assert previous_obj_pkl.distro.networking is not None
+
+ def test_blacklist_drivers_set_on_networking(self, previous_obj_pkl):
+ """We always expect Networking.blacklist_drivers to be initialised."""
+ assert previous_obj_pkl.distro.networking.blacklist_drivers is None
+
+ def test_paths_has_run_dir_attribute(self, previous_obj_pkl):
+ assert previous_obj_pkl.paths.run_dir is not None
+
+ def test_vendordata_exists(self, previous_obj_pkl):
+ assert previous_obj_pkl.vendordata2 is None
+ assert previous_obj_pkl.vendordata2_raw is None
diff --git a/tests/unittests/test_url_helper.py b/tests/unittests/test_url_helper.py
new file mode 100644
index 00000000..85810e00
--- /dev/null
+++ b/tests/unittests/test_url_helper.py
@@ -0,0 +1,200 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import logging
+
+import httpretty
+import requests
+
+from cloudinit import util, version
+from cloudinit.url_helper import (
+ NOT_FOUND,
+ REDACTED,
+ UrlError,
+ oauth_headers,
+ read_file_or_url,
+ retry_on_url_exc,
+)
+from tests.unittests.helpers import CiTestCase, mock, skipIf
+
+try:
+ import oauthlib
+
+ assert oauthlib # avoid pyflakes error F401: import unused
+ _missing_oauthlib_dep = False
+except ImportError:
+ _missing_oauthlib_dep = True
+
+
+M_PATH = "cloudinit.url_helper."
+
+
+class TestOAuthHeaders(CiTestCase):
+ def test_oauth_headers_raises_not_implemented_when_oathlib_missing(self):
+ """oauth_headers raises a NotImplemented error when oauth absent."""
+ with mock.patch.dict("sys.modules", {"oauthlib": None}):
+ with self.assertRaises(NotImplementedError) as context_manager:
+ oauth_headers(1, 2, 3, 4, 5)
+ self.assertEqual(
+ "oauth support is not available", str(context_manager.exception)
+ )
+
+ @skipIf(_missing_oauthlib_dep, "No python-oauthlib dependency")
+ @mock.patch("oauthlib.oauth1.Client")
+ def test_oauth_headers_calls_oathlibclient_when_available(self, m_client):
+ """oauth_headers calls oaut1.hClient.sign with the provided url."""
+
+ class fakeclient(object):
+ def sign(self, url):
+ # The first and 3rd item of the client.sign tuple are ignored
+ return ("junk", url, "junk2")
+
+ m_client.return_value = fakeclient()
+
+ return_value = oauth_headers(
+ "url",
+ "consumer_key",
+ "token_key",
+ "token_secret",
+ "consumer_secret",
+ )
+ self.assertEqual("url", return_value)
+
+
+class TestReadFileOrUrl(CiTestCase):
+
+ with_logs = True
+
+ def test_read_file_or_url_str_from_file(self):
+ """Test that str(result.contents) on file is text version of contents.
+ It should not be "b'data'", but just "'data'" """
+ tmpf = self.tmp_path("myfile1")
+ data = b"This is my file content\n"
+ util.write_file(tmpf, data, omode="wb")
+ result = read_file_or_url("file://%s" % tmpf)
+ self.assertEqual(result.contents, data)
+ self.assertEqual(str(result), data.decode("utf-8"))
+
+ @httpretty.activate
+ def test_read_file_or_url_str_from_url(self):
+ """Test that str(result.contents) on url is text version of contents.
+ It should not be "b'data'", but just "'data'" """
+ url = "http://hostname/path"
+ data = b"This is my url content\n"
+ httpretty.register_uri(httpretty.GET, url, data)
+ result = read_file_or_url(url)
+ self.assertEqual(result.contents, data)
+ self.assertEqual(str(result), data.decode("utf-8"))
+
+ @httpretty.activate
+ def test_read_file_or_url_str_from_url_redacting_headers_from_logs(self):
+ """Headers are redacted from logs but unredacted in requests."""
+ url = "http://hostname/path"
+ headers = {"sensitive": "sekret", "server": "blah"}
+ httpretty.register_uri(httpretty.GET, url)
+ # By default, httpretty will log our request along with the header,
+ # so if we don't change this the secret will show up in the logs
+ logging.getLogger("httpretty.core").setLevel(logging.CRITICAL)
+
+ read_file_or_url(url, headers=headers, headers_redact=["sensitive"])
+ logs = self.logs.getvalue()
+ for k in headers.keys():
+ self.assertEqual(headers[k], httpretty.last_request().headers[k])
+ self.assertIn(REDACTED, logs)
+ self.assertNotIn("sekret", logs)
+
+ @httpretty.activate
+ def test_read_file_or_url_str_from_url_redacts_noheaders(self):
+ """When no headers_redact, header values are in logs and requests."""
+ url = "http://hostname/path"
+ headers = {"sensitive": "sekret", "server": "blah"}
+ httpretty.register_uri(httpretty.GET, url)
+
+ read_file_or_url(url, headers=headers)
+ for k in headers.keys():
+ self.assertEqual(headers[k], httpretty.last_request().headers[k])
+ logs = self.logs.getvalue()
+ self.assertNotIn(REDACTED, logs)
+ self.assertIn("sekret", logs)
+
+ @mock.patch(M_PATH + "readurl")
+ def test_read_file_or_url_passes_params_to_readurl(self, m_readurl):
+ """read_file_or_url passes all params through to readurl."""
+ url = "http://hostname/path"
+ response = "This is my url content\n"
+ m_readurl.return_value = response
+ params = {
+ "url": url,
+ "timeout": 1,
+ "retries": 2,
+ "headers": {"somehdr": "val"},
+ "data": "data",
+ "sec_between": 1,
+ "ssl_details": {"cert_file": "/path/cert.pem"},
+ "headers_cb": "headers_cb",
+ "exception_cb": "exception_cb",
+ }
+ self.assertEqual(response, read_file_or_url(**params))
+ params.pop("url") # url is passed in as a positional arg
+ self.assertEqual([mock.call(url, **params)], m_readurl.call_args_list)
+
+ def test_wb_read_url_defaults_honored_by_read_file_or_url_callers(self):
+ """Readurl param defaults used when unspecified by read_file_or_url
+
+ Param defaults tested are as follows:
+ retries: 0, additional headers None beyond default, method: GET,
+ data: None, check_status: True and allow_redirects: True
+ """
+ url = "http://hostname/path"
+
+ m_response = mock.MagicMock()
+
+ class FakeSession(requests.Session):
+ @classmethod
+ def request(cls, **kwargs):
+ self.assertEqual(
+ {
+ "url": url,
+ "allow_redirects": True,
+ "method": "GET",
+ "headers": {
+ "User-Agent": "Cloud-Init/%s"
+ % (version.version_string())
+ },
+ },
+ kwargs,
+ )
+ return m_response
+
+ with mock.patch(M_PATH + "requests.Session") as m_session:
+ error = requests.exceptions.HTTPError("broke")
+ m_session.side_effect = [error, FakeSession()]
+ # assert no retries and check_status == True
+ with self.assertRaises(UrlError) as context_manager:
+ response = read_file_or_url(url)
+ self.assertEqual("broke", str(context_manager.exception))
+ # assert default headers, method, url and allow_redirects True
+ # Success on 2nd call with FakeSession
+ response = read_file_or_url(url)
+ self.assertEqual(m_response, response._response)
+
+
+class TestRetryOnUrlExc(CiTestCase):
+ def test_do_not_retry_non_urlerror(self):
+ """When exception is not UrlError return False."""
+ myerror = IOError("something unexcpected")
+ self.assertFalse(retry_on_url_exc(msg="", exc=myerror))
+
+ def test_perform_retries_on_not_found(self):
+ """When exception is UrlError with a 404 status code return True."""
+ myerror = UrlError(
+ cause=RuntimeError("something was not found"), code=NOT_FOUND
+ )
+ self.assertTrue(retry_on_url_exc(msg="", exc=myerror))
+
+ def test_perform_retries_on_timeout(self):
+ """When exception is a requests.Timout return True."""
+ myerror = UrlError(cause=requests.Timeout("something timed out"))
+ self.assertTrue(retry_on_url_exc(msg="", exc=myerror))
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
index 857629f1..3765511b 100644
--- a/tests/unittests/test_util.py
+++ b/tests/unittests/test_util.py
@@ -1,23 +1,1339 @@
# This file is part of cloud-init. See LICENSE file for license information.
+"""Tests for cloudinit.util"""
+
+import base64
import io
+import json
import logging
import os
+import platform
import re
import shutil
import stat
import tempfile
+from textwrap import dedent
+from unittest import mock
+
import pytest
import yaml
-from unittest import mock
-from cloudinit import subp
-from cloudinit import importer, util
-from cloudinit.tests import helpers
+from cloudinit import importer, subp, util
+from tests.unittests import helpers
+from tests.unittests.helpers import CiTestCase
+
+LOG = logging.getLogger(__name__)
+
+MOUNT_INFO = [
+ "68 0 8:3 / / ro,relatime shared:1 - btrfs /dev/sda1 ro,attr2,inode64",
+ "153 68 254:0 / /home rw,relatime shared:101 - xfs /dev/sda2 rw,attr2",
+]
+
+OS_RELEASE_SLES = dedent(
+ """\
+ NAME="SLES"
+ VERSION="12-SP3"
+ VERSION_ID="12.3"
+ PRETTY_NAME="SUSE Linux Enterprise Server 12 SP3"
+ ID="sles"
+ ANSI_COLOR="0;32"
+ CPE_NAME="cpe:/o:suse:sles:12:sp3"
+"""
+)
+
+OS_RELEASE_OPENSUSE = dedent(
+ """\
+ NAME="openSUSE Leap"
+ VERSION="42.3"
+ ID=opensuse
+ ID_LIKE="suse"
+ VERSION_ID="42.3"
+ PRETTY_NAME="openSUSE Leap 42.3"
+ ANSI_COLOR="0;32"
+ CPE_NAME="cpe:/o:opensuse:leap:42.3"
+ BUG_REPORT_URL="https://bugs.opensuse.org"
+ HOME_URL="https://www.opensuse.org/"
+"""
+)
+
+OS_RELEASE_OPENSUSE_L15 = dedent(
+ """\
+ NAME="openSUSE Leap"
+ VERSION="15.0"
+ ID="opensuse-leap"
+ ID_LIKE="suse opensuse"
+ VERSION_ID="15.0"
+ PRETTY_NAME="openSUSE Leap 15.0"
+ ANSI_COLOR="0;32"
+ CPE_NAME="cpe:/o:opensuse:leap:15.0"
+ BUG_REPORT_URL="https://bugs.opensuse.org"
+ HOME_URL="https://www.opensuse.org/"
+"""
+)
+
+OS_RELEASE_OPENSUSE_TW = dedent(
+ """\
+ NAME="openSUSE Tumbleweed"
+ ID="opensuse-tumbleweed"
+ ID_LIKE="opensuse suse"
+ VERSION_ID="20180920"
+ PRETTY_NAME="openSUSE Tumbleweed"
+ ANSI_COLOR="0;32"
+ CPE_NAME="cpe:/o:opensuse:tumbleweed:20180920"
+ BUG_REPORT_URL="https://bugs.opensuse.org"
+ HOME_URL="https://www.opensuse.org/"
+"""
+)
+
+OS_RELEASE_CENTOS = dedent(
+ """\
+ NAME="CentOS Linux"
+ VERSION="7 (Core)"
+ ID="centos"
+ ID_LIKE="rhel fedora"
+ VERSION_ID="7"
+ PRETTY_NAME="CentOS Linux 7 (Core)"
+ ANSI_COLOR="0;31"
+ CPE_NAME="cpe:/o:centos:centos:7"
+ HOME_URL="https://www.centos.org/"
+ BUG_REPORT_URL="https://bugs.centos.org/"
+
+ CENTOS_MANTISBT_PROJECT="CentOS-7"
+ CENTOS_MANTISBT_PROJECT_VERSION="7"
+ REDHAT_SUPPORT_PRODUCT="centos"
+ REDHAT_SUPPORT_PRODUCT_VERSION="7"
+"""
+)
+
+OS_RELEASE_REDHAT_7 = dedent(
+ """\
+ NAME="Red Hat Enterprise Linux Server"
+ VERSION="7.5 (Maipo)"
+ ID="rhel"
+ ID_LIKE="fedora"
+ VARIANT="Server"
+ VARIANT_ID="server"
+ VERSION_ID="7.5"
+ PRETTY_NAME="Red Hat"
+ ANSI_COLOR="0;31"
+ CPE_NAME="cpe:/o:redhat:enterprise_linux:7.5:GA:server"
+ HOME_URL="https://www.redhat.com/"
+ BUG_REPORT_URL="https://bugzilla.redhat.com/"
+
+ REDHAT_BUGZILLA_PRODUCT="Red Hat Enterprise Linux 7"
+ REDHAT_BUGZILLA_PRODUCT_VERSION=7.5
+ REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux"
+ REDHAT_SUPPORT_PRODUCT_VERSION="7.5"
+"""
+)
+
+OS_RELEASE_ALMALINUX_8 = dedent(
+ """\
+ NAME="AlmaLinux"
+ VERSION="8.3 (Purple Manul)"
+ ID="almalinux"
+ ID_LIKE="rhel centos fedora"
+ VERSION_ID="8.3"
+ PLATFORM_ID="platform:el8"
+ PRETTY_NAME="AlmaLinux 8.3 (Purple Manul)"
+ ANSI_COLOR="0;34"
+ CPE_NAME="cpe:/o:almalinux:almalinux:8.3:GA"
+ HOME_URL="https://almalinux.org/"
+ BUG_REPORT_URL="https://bugs.almalinux.org/"
+
+ ALMALINUX_MANTISBT_PROJECT="AlmaLinux-8"
+ ALMALINUX_MANTISBT_PROJECT_VERSION="8.3"
+"""
+)
+
+OS_RELEASE_EUROLINUX_7 = dedent(
+ """\
+ VERSION="7.9 (Minsk)"
+ ID="eurolinux"
+ ID_LIKE="rhel scientific centos fedora"
+ VERSION_ID="7.9"
+ PRETTY_NAME="EuroLinux 7.9 (Minsk)"
+ ANSI_COLOR="0;31"
+ CPE_NAME="cpe:/o:eurolinux:eurolinux:7.9:GA"
+ HOME_URL="http://www.euro-linux.com/"
+ BUG_REPORT_URL="mailto:support@euro-linux.com"
+ REDHAT_BUGZILLA_PRODUCT="EuroLinux 7"
+ REDHAT_BUGZILLA_PRODUCT_VERSION=7.9
+ REDHAT_SUPPORT_PRODUCT="EuroLinux"
+ REDHAT_SUPPORT_PRODUCT_VERSION="7.9"
+"""
+)
+
+OS_RELEASE_EUROLINUX_8 = dedent(
+ """\
+ NAME="EuroLinux"
+ VERSION="8.4 (Vaduz)"
+ ID="eurolinux"
+ ID_LIKE="rhel fedora centos"
+ VERSION_ID="8.4"
+ PLATFORM_ID="platform:el8"
+ PRETTY_NAME="EuroLinux 8.4 (Vaduz)"
+ ANSI_COLOR="0;34"
+ CPE_NAME="cpe:/o:eurolinux:eurolinux:8"
+ HOME_URL="https://www.euro-linux.com/"
+ BUG_REPORT_URL="https://github.com/EuroLinux/eurolinux-distro-bugs-and-rfc/"
+ REDHAT_SUPPORT_PRODUCT="EuroLinux"
+ REDHAT_SUPPORT_PRODUCT_VERSION="8"
+"""
+)
+
+OS_RELEASE_MIRACLELINUX_8 = dedent(
+ """\
+ NAME="MIRACLE LINUX"
+ VERSION="8.4 (Peony)"
+ ID="miraclelinux"
+ ID_LIKE="rhel fedora"
+ PLATFORM_ID="platform:el8"
+ VERSION_ID="8"
+ PRETTY_NAME="MIRACLE LINUX 8.4 (Peony)"
+ ANSI_COLOR="0;31"
+ CPE_NAME="cpe:/o:cybertrust_japan:miracle_linux:8"
+ HOME_URL="https://www.cybertrust.co.jp/miracle-linux/"
+ DOCUMENTATION_URL="https://www.miraclelinux.com/support/miraclelinux8"
+ BUG_REPORT_URL="https://bugzilla.asianux.com/"
+ MIRACLELINUX_SUPPORT_PRODUCT="MIRACLE LINUX"
+ MIRACLELINUX_SUPPORT_PRODUCT_VERSION="8"
+"""
+)
+
+OS_RELEASE_ROCKY_8 = dedent(
+ """\
+ NAME="Rocky Linux"
+ VERSION="8.3 (Green Obsidian)"
+ ID="rocky"
+ ID_LIKE="rhel fedora"
+ VERSION_ID="8.3"
+ PLATFORM_ID="platform:el8"
+ PRETTY_NAME="Rocky Linux 8.3 (Green Obsidian)"
+ ANSI_COLOR="0;31"
+ CPE_NAME="cpe:/o:rocky:rocky:8"
+ HOME_URL="https://rockylinux.org/"
+ BUG_REPORT_URL="https://bugs.rockylinux.org/"
+ ROCKY_SUPPORT_PRODUCT="Rocky Linux"
+ ROCKY_SUPPORT_PRODUCT_VERSION="8"
+"""
+)
+
+OS_RELEASE_VIRTUOZZO_8 = dedent(
+ """\
+ NAME="Virtuozzo Linux"
+ VERSION="8"
+ ID="virtuozzo"
+ ID_LIKE="rhel fedora"
+ VERSION_ID="8"
+ PLATFORM_ID="platform:el8"
+ PRETTY_NAME="Virtuozzo Linux"
+ ANSI_COLOR="0;31"
+ CPE_NAME="cpe:/o:virtuozzoproject:vzlinux:8"
+ HOME_URL="https://www.vzlinux.org"
+ BUG_REPORT_URL="https://bugs.openvz.org"
+"""
+)
+
+OS_RELEASE_CLOUDLINUX_8 = dedent(
+ """\
+ NAME="CloudLinux"
+ VERSION="8.4 (Valery Rozhdestvensky)"
+ ID="cloudlinux"
+ ID_LIKE="rhel fedora centos"
+ VERSION_ID="8.4"
+ PLATFORM_ID="platform:el8"
+ PRETTY_NAME="CloudLinux 8.4 (Valery Rozhdestvensky)"
+ ANSI_COLOR="0;31"
+ CPE_NAME="cpe:/o:cloudlinux:cloudlinux:8.4:GA:server"
+ HOME_URL="https://www.cloudlinux.com/"
+ BUG_REPORT_URL="https://www.cloudlinux.com/support"
+"""
+)
+
+OS_RELEASE_OPENEULER_20 = dedent(
+ """\
+ NAME="openEuler"
+ VERSION="20.03 (LTS-SP2)"
+ ID="openEuler"
+ VERSION_ID="20.03"
+ PRETTY_NAME="openEuler 20.03 (LTS-SP2)"
+ ANSI_COLOR="0;31"
+"""
+)
+
+REDHAT_RELEASE_CENTOS_6 = "CentOS release 6.10 (Final)"
+REDHAT_RELEASE_CENTOS_7 = "CentOS Linux release 7.5.1804 (Core)"
+REDHAT_RELEASE_REDHAT_6 = (
+ "Red Hat Enterprise Linux Server release 6.10 (Santiago)"
+)
+REDHAT_RELEASE_REDHAT_7 = "Red Hat Enterprise Linux Server release 7.5 (Maipo)"
+REDHAT_RELEASE_ALMALINUX_8 = "AlmaLinux release 8.3 (Purple Manul)"
+REDHAT_RELEASE_EUROLINUX_7 = "EuroLinux release 7.9 (Minsk)"
+REDHAT_RELEASE_EUROLINUX_8 = "EuroLinux release 8.4 (Vaduz)"
+REDHAT_RELEASE_MIRACLELINUX_8 = "MIRACLE LINUX release 8.4 (Peony)"
+REDHAT_RELEASE_ROCKY_8 = "Rocky Linux release 8.3 (Green Obsidian)"
+REDHAT_RELEASE_VIRTUOZZO_8 = "Virtuozzo Linux release 8"
+REDHAT_RELEASE_CLOUDLINUX_8 = "CloudLinux release 8.4 (Valery Rozhdestvensky)"
+OS_RELEASE_DEBIAN = dedent(
+ """\
+ PRETTY_NAME="Debian GNU/Linux 9 (stretch)"
+ NAME="Debian GNU/Linux"
+ VERSION_ID="9"
+ VERSION="9 (stretch)"
+ ID=debian
+ HOME_URL="https://www.debian.org/"
+ SUPPORT_URL="https://www.debian.org/support"
+ BUG_REPORT_URL="https://bugs.debian.org/"
+"""
+)
+
+OS_RELEASE_UBUNTU = dedent(
+ """\
+ NAME="Ubuntu"\n
+ # comment test
+ VERSION="16.04.3 LTS (Xenial Xerus)"\n
+ ID=ubuntu\n
+ ID_LIKE=debian\n
+ PRETTY_NAME="Ubuntu 16.04.3 LTS"\n
+ VERSION_ID="16.04"\n
+ HOME_URL="http://www.ubuntu.com/"\n
+ SUPPORT_URL="http://help.ubuntu.com/"\n
+ BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"\n
+ VERSION_CODENAME=xenial\n
+ UBUNTU_CODENAME=xenial\n
+"""
+)
+
+OS_RELEASE_PHOTON = """\
+ NAME="VMware Photon OS"
+ VERSION="4.0"
+ ID=photon
+ VERSION_ID=4.0
+ PRETTY_NAME="VMware Photon OS/Linux"
+ ANSI_COLOR="1;34"
+ HOME_URL="https://vmware.github.io/photon/"
+ BUG_REPORT_URL="https://github.com/vmware/photon/issues"
+"""
+
+
+class FakeCloud(object):
+ def __init__(self, hostname, fqdn):
+ self.hostname = hostname
+ self.fqdn = fqdn
+ self.calls = []
+
+ def get_hostname(self, fqdn=None, metadata_only=None):
+ myargs = {}
+ if fqdn is not None:
+ myargs["fqdn"] = fqdn
+ if metadata_only is not None:
+ myargs["metadata_only"] = metadata_only
+ self.calls.append(myargs)
+ if fqdn:
+ return self.fqdn
+ return self.hostname
+
+
+class TestUtil(CiTestCase):
+ def test_parse_mount_info_no_opts_no_arg(self):
+ result = util.parse_mount_info("/home", MOUNT_INFO, LOG)
+ self.assertEqual(("/dev/sda2", "xfs", "/home"), result)
+
+ def test_parse_mount_info_no_opts_arg(self):
+ result = util.parse_mount_info("/home", MOUNT_INFO, LOG, False)
+ self.assertEqual(("/dev/sda2", "xfs", "/home"), result)
+
+ def test_parse_mount_info_with_opts(self):
+ result = util.parse_mount_info("/", MOUNT_INFO, LOG, True)
+ self.assertEqual(("/dev/sda1", "btrfs", "/", "ro,relatime"), result)
+
+ @mock.patch("cloudinit.util.get_mount_info")
+ def test_mount_is_rw(self, m_mount_info):
+ m_mount_info.return_value = ("/dev/sda1", "btrfs", "/", "rw,relatime")
+ is_rw = util.mount_is_read_write("/")
+ self.assertEqual(is_rw, True)
+
+ @mock.patch("cloudinit.util.get_mount_info")
+ def test_mount_is_ro(self, m_mount_info):
+ m_mount_info.return_value = ("/dev/sda1", "btrfs", "/", "ro,relatime")
+ is_rw = util.mount_is_read_write("/")
+ self.assertEqual(is_rw, False)
+
+
+class TestUptime(CiTestCase):
+ @mock.patch("cloudinit.util.boottime")
+ @mock.patch("cloudinit.util.os.path.exists")
+ @mock.patch("cloudinit.util.time.time")
+ def test_uptime_non_linux_path(self, m_time, m_exists, m_boottime):
+ boottime = 1000.0
+ uptime = 10.0
+ m_boottime.return_value = boottime
+ m_time.return_value = boottime + uptime
+ m_exists.return_value = False
+ result = util.uptime()
+ self.assertEqual(str(uptime), result)
+
+
+class TestShellify(CiTestCase):
+ def test_input_dict_raises_type_error(self):
+ self.assertRaisesRegex(
+ TypeError,
+ "Input.*was.*dict.*xpected",
+ util.shellify,
+ {"mykey": "myval"},
+ )
+
+ def test_input_str_raises_type_error(self):
+ self.assertRaisesRegex(
+ TypeError, "Input.*was.*str.*xpected", util.shellify, "foobar"
+ )
+ def test_value_with_int_raises_type_error(self):
+ self.assertRaisesRegex(
+ TypeError, "shellify.*int", util.shellify, ["foo", 1]
+ )
-class FakeSelinux(object):
+ def test_supports_strings_and_lists(self):
+ self.assertEqual(
+ "\n".join(
+ [
+ "#!/bin/sh",
+ "echo hi mom",
+ "'echo' 'hi dad'",
+ "'echo' 'hi' 'sis'",
+ "",
+ ]
+ ),
+ util.shellify(
+ ["echo hi mom", ["echo", "hi dad"], ("echo", "hi", "sis")]
+ ),
+ )
+
+ def test_supports_comments(self):
+ self.assertEqual(
+ "\n".join(["#!/bin/sh", "echo start", "echo end", ""]),
+ util.shellify(["echo start", None, "echo end"]),
+ )
+
+
+class TestGetHostnameFqdn(CiTestCase):
+ def test_get_hostname_fqdn_from_only_cfg_fqdn(self):
+ """When cfg only has the fqdn key, derive hostname and fqdn from it."""
+ hostname, fqdn = util.get_hostname_fqdn(
+ cfg={"fqdn": "myhost.domain.com"}, cloud=None
+ )
+ self.assertEqual("myhost", hostname)
+ self.assertEqual("myhost.domain.com", fqdn)
+
+ def test_get_hostname_fqdn_from_cfg_fqdn_and_hostname(self):
+ """When cfg has both fqdn and hostname keys, return them."""
+ hostname, fqdn = util.get_hostname_fqdn(
+ cfg={"fqdn": "myhost.domain.com", "hostname": "other"}, cloud=None
+ )
+ self.assertEqual("other", hostname)
+ self.assertEqual("myhost.domain.com", fqdn)
+
+ def test_get_hostname_fqdn_from_cfg_hostname_with_domain(self):
+ """When cfg has only hostname key which represents a fqdn, use that."""
+ hostname, fqdn = util.get_hostname_fqdn(
+ cfg={"hostname": "myhost.domain.com"}, cloud=None
+ )
+ self.assertEqual("myhost", hostname)
+ self.assertEqual("myhost.domain.com", fqdn)
+
+ def test_get_hostname_fqdn_from_cfg_hostname_without_domain(self):
+ """When cfg has a hostname without a '.' query cloud.get_hostname."""
+ mycloud = FakeCloud("cloudhost", "cloudhost.mycloud.com")
+ hostname, fqdn = util.get_hostname_fqdn(
+ cfg={"hostname": "myhost"}, cloud=mycloud
+ )
+ self.assertEqual("myhost", hostname)
+ self.assertEqual("cloudhost.mycloud.com", fqdn)
+ self.assertEqual(
+ [{"fqdn": True, "metadata_only": False}], mycloud.calls
+ )
+
+ def test_get_hostname_fqdn_from_without_fqdn_or_hostname(self):
+ """When cfg has neither hostname nor fqdn cloud.get_hostname."""
+ mycloud = FakeCloud("cloudhost", "cloudhost.mycloud.com")
+ hostname, fqdn = util.get_hostname_fqdn(cfg={}, cloud=mycloud)
+ self.assertEqual("cloudhost", hostname)
+ self.assertEqual("cloudhost.mycloud.com", fqdn)
+ self.assertEqual(
+ [{"fqdn": True, "metadata_only": False}, {"metadata_only": False}],
+ mycloud.calls,
+ )
+
+ def test_get_hostname_fqdn_from_passes_metadata_only_to_cloud(self):
+ """Calls to cloud.get_hostname pass the metadata_only parameter."""
+ mycloud = FakeCloud("cloudhost", "cloudhost.mycloud.com")
+ _hn, _fqdn = util.get_hostname_fqdn(
+ cfg={}, cloud=mycloud, metadata_only=True
+ )
+ self.assertEqual(
+ [{"fqdn": True, "metadata_only": True}, {"metadata_only": True}],
+ mycloud.calls,
+ )
+
+
+class TestBlkid(CiTestCase):
+ ids = {
+ "id01": "1111-1111",
+ "id02": "22222222-2222",
+ "id03": "33333333-3333",
+ "id04": "44444444-4444",
+ "id05": "55555555-5555-5555-5555-555555555555",
+ "id06": "66666666-6666-6666-6666-666666666666",
+ "id07": "52894610484658920398",
+ "id08": "86753098675309867530",
+ "id09": "99999999-9999-9999-9999-999999999999",
+ }
+
+ blkid_out = dedent(
+ """\
+ /dev/loop0: TYPE="squashfs"
+ /dev/loop1: TYPE="squashfs"
+ /dev/loop2: TYPE="squashfs"
+ /dev/loop3: TYPE="squashfs"
+ /dev/sda1: UUID="{id01}" TYPE="vfat" PARTUUID="{id02}"
+ /dev/sda2: UUID="{id03}" TYPE="ext4" PARTUUID="{id04}"
+ /dev/sda3: UUID="{id05}" TYPE="ext4" PARTUUID="{id06}"
+ /dev/sda4: LABEL="default" UUID="{id07}" UUID_SUB="{id08}" """
+ """TYPE="zfs_member" PARTUUID="{id09}"
+ /dev/loop4: TYPE="squashfs"
+ """
+ )
+
+ maxDiff = None
+
+ def _get_expected(self):
+ return {
+ "/dev/loop0": {"DEVNAME": "/dev/loop0", "TYPE": "squashfs"},
+ "/dev/loop1": {"DEVNAME": "/dev/loop1", "TYPE": "squashfs"},
+ "/dev/loop2": {"DEVNAME": "/dev/loop2", "TYPE": "squashfs"},
+ "/dev/loop3": {"DEVNAME": "/dev/loop3", "TYPE": "squashfs"},
+ "/dev/loop4": {"DEVNAME": "/dev/loop4", "TYPE": "squashfs"},
+ "/dev/sda1": {
+ "DEVNAME": "/dev/sda1",
+ "TYPE": "vfat",
+ "UUID": self.ids["id01"],
+ "PARTUUID": self.ids["id02"],
+ },
+ "/dev/sda2": {
+ "DEVNAME": "/dev/sda2",
+ "TYPE": "ext4",
+ "UUID": self.ids["id03"],
+ "PARTUUID": self.ids["id04"],
+ },
+ "/dev/sda3": {
+ "DEVNAME": "/dev/sda3",
+ "TYPE": "ext4",
+ "UUID": self.ids["id05"],
+ "PARTUUID": self.ids["id06"],
+ },
+ "/dev/sda4": {
+ "DEVNAME": "/dev/sda4",
+ "TYPE": "zfs_member",
+ "LABEL": "default",
+ "UUID": self.ids["id07"],
+ "UUID_SUB": self.ids["id08"],
+ "PARTUUID": self.ids["id09"],
+ },
+ }
+
+ @mock.patch("cloudinit.subp.subp")
+ def test_functional_blkid(self, m_subp):
+ m_subp.return_value = (self.blkid_out.format(**self.ids), "")
+ self.assertEqual(self._get_expected(), util.blkid())
+ m_subp.assert_called_with(
+ ["blkid", "-o", "full"], capture=True, decode="replace"
+ )
+
+ @mock.patch("cloudinit.subp.subp")
+ def test_blkid_no_cache_uses_no_cache(self, m_subp):
+ """blkid should turn off cache if disable_cache is true."""
+ m_subp.return_value = (self.blkid_out.format(**self.ids), "")
+ self.assertEqual(self._get_expected(), util.blkid(disable_cache=True))
+ m_subp.assert_called_with(
+ ["blkid", "-o", "full", "-c", "/dev/null"],
+ capture=True,
+ decode="replace",
+ )
+
+
+@mock.patch("cloudinit.subp.subp")
+class TestUdevadmSettle(CiTestCase):
+ def test_with_no_params(self, m_subp):
+ """called with no parameters."""
+ util.udevadm_settle()
+ m_subp.called_once_with(mock.call(["udevadm", "settle"]))
+
+ def test_with_exists_and_not_exists(self, m_subp):
+ """with exists=file where file does not exist should invoke subp."""
+ mydev = self.tmp_path("mydev")
+ util.udevadm_settle(exists=mydev)
+ m_subp.called_once_with(
+ ["udevadm", "settle", "--exit-if-exists=%s" % mydev]
+ )
+
+ def test_with_exists_and_file_exists(self, m_subp):
+ """with exists=file where file does exist should not invoke subp."""
+ mydev = self.tmp_path("mydev")
+ util.write_file(mydev, "foo\n")
+ util.udevadm_settle(exists=mydev)
+ self.assertIsNone(m_subp.call_args)
+
+ def test_with_timeout_int(self, m_subp):
+ """timeout can be an integer."""
+ timeout = 9
+ util.udevadm_settle(timeout=timeout)
+ m_subp.called_once_with(
+ ["udevadm", "settle", "--timeout=%s" % timeout]
+ )
+
+ def test_with_timeout_string(self, m_subp):
+ """timeout can be a string."""
+ timeout = "555"
+ util.udevadm_settle(timeout=timeout)
+ m_subp.assert_called_once_with(
+ ["udevadm", "settle", "--timeout=%s" % timeout]
+ )
+
+ def test_with_exists_and_timeout(self, m_subp):
+ """test call with both exists and timeout."""
+ mydev = self.tmp_path("mydev")
+ timeout = "3"
+ util.udevadm_settle(exists=mydev)
+ m_subp.called_once_with(
+ [
+ "udevadm",
+ "settle",
+ "--exit-if-exists=%s" % mydev,
+ "--timeout=%s" % timeout,
+ ]
+ )
+
+ def test_subp_exception_raises_to_caller(self, m_subp):
+ m_subp.side_effect = subp.ProcessExecutionError("BOOM")
+ self.assertRaises(subp.ProcessExecutionError, util.udevadm_settle)
+
+
+@mock.patch("os.path.exists")
+class TestGetLinuxDistro(CiTestCase):
+ def setUp(self):
+ # python2 has no lru_cache, and therefore, no cache_clear()
+ if hasattr(util.get_linux_distro, "cache_clear"):
+ util.get_linux_distro.cache_clear()
+
+ @classmethod
+ def os_release_exists(self, path):
+ """Side effect function"""
+ if path == "/etc/os-release":
+ return 1
+
+ @classmethod
+ def redhat_release_exists(self, path):
+ """Side effect function"""
+ if path == "/etc/redhat-release":
+ return 1
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_distro_quoted_name(self, m_os_release, m_path_exists):
+ """Verify we get the correct name if the os-release file has
+ the distro name in quotes"""
+ m_os_release.return_value = OS_RELEASE_SLES
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("sles", "12.3", platform.machine()), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_distro_bare_name(self, m_os_release, m_path_exists):
+ """Verify we get the correct name if the os-release file does not
+ have the distro name in quotes"""
+ m_os_release.return_value = OS_RELEASE_UBUNTU
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("ubuntu", "16.04", "xenial"), dist)
+
+ @mock.patch("platform.system")
+ @mock.patch("platform.release")
+ @mock.patch("cloudinit.util._parse_redhat_release")
+ def test_get_linux_freebsd(
+ self,
+ m_parse_redhat_release,
+ m_platform_release,
+ m_platform_system,
+ m_path_exists,
+ ):
+ """Verify we get the correct name and release name on FreeBSD."""
+ m_path_exists.return_value = False
+ m_platform_release.return_value = "12.0-RELEASE-p10"
+ m_platform_system.return_value = "FreeBSD"
+ m_parse_redhat_release.return_value = {}
+ util.is_BSD.cache_clear()
+ dist = util.get_linux_distro()
+ self.assertEqual(("freebsd", "12.0-RELEASE-p10", ""), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_centos6(self, m_os_release, m_path_exists):
+ """Verify we get the correct name and release name on CentOS 6."""
+ m_os_release.return_value = REDHAT_RELEASE_CENTOS_6
+ m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("centos", "6.10", "Final"), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_centos7_redhat_release(self, m_os_release, m_exists):
+ """Verify the correct release info on CentOS 7 without os-release."""
+ m_os_release.return_value = REDHAT_RELEASE_CENTOS_7
+ m_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("centos", "7.5.1804", "Core"), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_redhat7_osrelease(self, m_os_release, m_path_exists):
+ """Verify redhat 7 read from os-release."""
+ m_os_release.return_value = OS_RELEASE_REDHAT_7
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("redhat", "7.5", "Maipo"), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_redhat7_rhrelease(self, m_os_release, m_path_exists):
+ """Verify redhat 7 read from redhat-release."""
+ m_os_release.return_value = REDHAT_RELEASE_REDHAT_7
+ m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("redhat", "7.5", "Maipo"), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_redhat6_rhrelease(self, m_os_release, m_path_exists):
+ """Verify redhat 6 read from redhat-release."""
+ m_os_release.return_value = REDHAT_RELEASE_REDHAT_6
+ m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("redhat", "6.10", "Santiago"), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_copr_centos(self, m_os_release, m_path_exists):
+ """Verify we get the correct name and release name on COPR CentOS."""
+ m_os_release.return_value = OS_RELEASE_CENTOS
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("centos", "7", "Core"), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_almalinux8_rhrelease(self, m_os_release, m_path_exists):
+ """Verify almalinux 8 read from redhat-release."""
+ m_os_release.return_value = REDHAT_RELEASE_ALMALINUX_8
+ m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("almalinux", "8.3", "Purple Manul"), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_almalinux8_osrelease(self, m_os_release, m_path_exists):
+ """Verify almalinux 8 read from os-release."""
+ m_os_release.return_value = OS_RELEASE_ALMALINUX_8
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("almalinux", "8.3", "Purple Manul"), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_eurolinux7_rhrelease(self, m_os_release, m_path_exists):
+ """Verify eurolinux 7 read from redhat-release."""
+ m_os_release.return_value = REDHAT_RELEASE_EUROLINUX_7
+ m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("eurolinux", "7.9", "Minsk"), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_eurolinux7_osrelease(self, m_os_release, m_path_exists):
+ """Verify eurolinux 7 read from os-release."""
+ m_os_release.return_value = OS_RELEASE_EUROLINUX_7
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("eurolinux", "7.9", "Minsk"), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_eurolinux8_rhrelease(self, m_os_release, m_path_exists):
+ """Verify eurolinux 8 read from redhat-release."""
+ m_os_release.return_value = REDHAT_RELEASE_EUROLINUX_8
+ m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("eurolinux", "8.4", "Vaduz"), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_eurolinux8_osrelease(self, m_os_release, m_path_exists):
+ """Verify eurolinux 8 read from os-release."""
+ m_os_release.return_value = OS_RELEASE_EUROLINUX_8
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("eurolinux", "8.4", "Vaduz"), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_miraclelinux8_rhrelease(
+ self, m_os_release, m_path_exists
+ ):
+ """Verify miraclelinux 8 read from redhat-release."""
+ m_os_release.return_value = REDHAT_RELEASE_MIRACLELINUX_8
+ m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("miracle", "8.4", "Peony"), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_miraclelinux8_osrelease(
+ self, m_os_release, m_path_exists
+ ):
+ """Verify miraclelinux 8 read from os-release."""
+ m_os_release.return_value = OS_RELEASE_MIRACLELINUX_8
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("miraclelinux", "8", "Peony"), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_rocky8_rhrelease(self, m_os_release, m_path_exists):
+ """Verify rocky linux 8 read from redhat-release."""
+ m_os_release.return_value = REDHAT_RELEASE_ROCKY_8
+ m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("rocky", "8.3", "Green Obsidian"), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_rocky8_osrelease(self, m_os_release, m_path_exists):
+ """Verify rocky linux 8 read from os-release."""
+ m_os_release.return_value = OS_RELEASE_ROCKY_8
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("rocky", "8.3", "Green Obsidian"), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_virtuozzo8_rhrelease(self, m_os_release, m_path_exists):
+ """Verify virtuozzo linux 8 read from redhat-release."""
+ m_os_release.return_value = REDHAT_RELEASE_VIRTUOZZO_8
+ m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("virtuozzo", "8", "Virtuozzo Linux"), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_virtuozzo8_osrelease(self, m_os_release, m_path_exists):
+ """Verify virtuozzo linux 8 read from os-release."""
+ m_os_release.return_value = OS_RELEASE_VIRTUOZZO_8
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("virtuozzo", "8", "Virtuozzo Linux"), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_cloud8_rhrelease(self, m_os_release, m_path_exists):
+ """Verify cloudlinux 8 read from redhat-release."""
+ m_os_release.return_value = REDHAT_RELEASE_CLOUDLINUX_8
+ m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("cloudlinux", "8.4", "Valery Rozhdestvensky"), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_cloud8_osrelease(self, m_os_release, m_path_exists):
+ """Verify cloudlinux 8 read from os-release."""
+ m_os_release.return_value = OS_RELEASE_CLOUDLINUX_8
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("cloudlinux", "8.4", "Valery Rozhdestvensky"), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_debian(self, m_os_release, m_path_exists):
+ """Verify we get the correct name and release name on Debian."""
+ m_os_release.return_value = OS_RELEASE_DEBIAN
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("debian", "9", "stretch"), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_openeuler(self, m_os_release, m_path_exists):
+ """Verify get the correct name and release name on Openeuler."""
+ m_os_release.return_value = OS_RELEASE_OPENEULER_20
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("openEuler", "20.03", "LTS-SP2"), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_opensuse(self, m_os_release, m_path_exists):
+ """Verify we get the correct name and machine arch on openSUSE
+ prior to openSUSE Leap 15.
+ """
+ m_os_release.return_value = OS_RELEASE_OPENSUSE
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("opensuse", "42.3", platform.machine()), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_opensuse_l15(self, m_os_release, m_path_exists):
+ """Verify we get the correct name and machine arch on openSUSE
+ for openSUSE Leap 15.0 and later.
+ """
+ m_os_release.return_value = OS_RELEASE_OPENSUSE_L15
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("opensuse-leap", "15.0", platform.machine()), dist)
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_opensuse_tw(self, m_os_release, m_path_exists):
+ """Verify we get the correct name and machine arch on openSUSE
+ for openSUSE Tumbleweed
+ """
+ m_os_release.return_value = OS_RELEASE_OPENSUSE_TW
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(
+ ("opensuse-tumbleweed", "20180920", platform.machine()), dist
+ )
+
+ @mock.patch("cloudinit.util.load_file")
+ def test_get_linux_photon_os_release(self, m_os_release, m_path_exists):
+ """Verify we get the correct name and machine arch on PhotonOS"""
+ m_os_release.return_value = OS_RELEASE_PHOTON
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("photon", "4.0", "VMware Photon OS/Linux"), dist)
+
+ @mock.patch("platform.system")
+ @mock.patch("platform.dist", create=True)
+ def test_get_linux_distro_no_data(
+ self, m_platform_dist, m_platform_system, m_path_exists
+ ):
+ """Verify we get no information if os-release does not exist"""
+ m_platform_dist.return_value = ("", "", "")
+ m_platform_system.return_value = "Linux"
+ m_path_exists.return_value = 0
+ dist = util.get_linux_distro()
+ self.assertEqual(("", "", ""), dist)
+
+ @mock.patch("platform.system")
+ @mock.patch("platform.dist", create=True)
+ def test_get_linux_distro_no_impl(
+ self, m_platform_dist, m_platform_system, m_path_exists
+ ):
+ """Verify we get an empty tuple when no information exists and
+ Exceptions are not propagated"""
+ m_platform_dist.side_effect = Exception()
+ m_platform_system.return_value = "Linux"
+ m_path_exists.return_value = 0
+ dist = util.get_linux_distro()
+ self.assertEqual(("", "", ""), dist)
+
+ @mock.patch("platform.system")
+ @mock.patch("platform.dist", create=True)
+ def test_get_linux_distro_plat_data(
+ self, m_platform_dist, m_platform_system, m_path_exists
+ ):
+ """Verify we get the correct platform information"""
+ m_platform_dist.return_value = ("foo", "1.1", "aarch64")
+ m_platform_system.return_value = "Linux"
+ m_path_exists.return_value = 0
+ dist = util.get_linux_distro()
+ self.assertEqual(("foo", "1.1", "aarch64"), dist)
+
+
+class TestGetVariant:
+ @pytest.mark.parametrize(
+ "info, expected_variant",
+ [
+ ({"system": "Linux", "dist": ("almalinux",)}, "almalinux"),
+ ({"system": "linux", "dist": ("alpine",)}, "alpine"),
+ ({"system": "linux", "dist": ("arch",)}, "arch"),
+ ({"system": "linux", "dist": ("centos",)}, "centos"),
+ ({"system": "linux", "dist": ("cloudlinux",)}, "cloudlinux"),
+ ({"system": "linux", "dist": ("debian",)}, "debian"),
+ ({"system": "linux", "dist": ("eurolinux",)}, "eurolinux"),
+ ({"system": "linux", "dist": ("fedora",)}, "fedora"),
+ ({"system": "linux", "dist": ("openEuler",)}, "openeuler"),
+ ({"system": "linux", "dist": ("photon",)}, "photon"),
+ ({"system": "linux", "dist": ("rhel",)}, "rhel"),
+ ({"system": "linux", "dist": ("rocky",)}, "rocky"),
+ ({"system": "linux", "dist": ("suse",)}, "suse"),
+ ({"system": "linux", "dist": ("virtuozzo",)}, "virtuozzo"),
+ ({"system": "linux", "dist": ("ubuntu",)}, "ubuntu"),
+ ({"system": "linux", "dist": ("linuxmint",)}, "ubuntu"),
+ ({"system": "linux", "dist": ("mint",)}, "ubuntu"),
+ ({"system": "linux", "dist": ("redhat",)}, "rhel"),
+ ({"system": "linux", "dist": ("opensuse",)}, "suse"),
+ ({"system": "linux", "dist": ("opensuse-tumbleweed",)}, "suse"),
+ ({"system": "linux", "dist": ("opensuse-leap",)}, "suse"),
+ ({"system": "linux", "dist": ("sles",)}, "suse"),
+ ({"system": "linux", "dist": ("sle_hpc",)}, "suse"),
+ ({"system": "linux", "dist": ("my_distro",)}, "linux"),
+ ({"system": "Windows", "dist": ("dontcare",)}, "windows"),
+ ({"system": "Darwin", "dist": ("dontcare",)}, "darwin"),
+ ({"system": "Freebsd", "dist": ("dontcare",)}, "freebsd"),
+ ({"system": "Netbsd", "dist": ("dontcare",)}, "netbsd"),
+ ({"system": "Openbsd", "dist": ("dontcare",)}, "openbsd"),
+ ({"system": "Dragonfly", "dist": ("dontcare",)}, "dragonfly"),
+ ],
+ )
+ def test_get_variant(self, info, expected_variant):
+ """Verify we get the correct variant name"""
+ assert util._get_variant(info) == expected_variant
+
+
+class TestJsonDumps(CiTestCase):
+ def test_is_str(self):
+ """json_dumps should return a string."""
+ self.assertTrue(isinstance(util.json_dumps({"abc": "123"}), str))
+
+ def test_utf8(self):
+ smiley = "\\ud83d\\ude03"
+ self.assertEqual(
+ {"smiley": smiley}, json.loads(util.json_dumps({"smiley": smiley}))
+ )
+
+ def test_non_utf8(self):
+ blob = b"\xba\x03Qx-#y\xea"
+ self.assertEqual(
+ {"blob": "ci-b64:" + base64.b64encode(blob).decode("utf-8")},
+ json.loads(util.json_dumps({"blob": blob})),
+ )
+
+
+@mock.patch("os.path.exists")
+class TestIsLXD(CiTestCase):
+ def test_is_lxd_true_on_sock_device(self, m_exists):
+ """When lxd's /dev/lxd/sock exists, is_lxd returns true."""
+ m_exists.return_value = True
+ self.assertTrue(util.is_lxd())
+ m_exists.assert_called_once_with("/dev/lxd/sock")
+
+ def test_is_lxd_false_when_sock_device_absent(self, m_exists):
+ """When lxd's /dev/lxd/sock is absent, is_lxd returns false."""
+ m_exists.return_value = False
+ self.assertFalse(util.is_lxd())
+ m_exists.assert_called_once_with("/dev/lxd/sock")
+
+
+class TestReadCcFromCmdline:
+ if hasattr(pytest, "param"):
+ random_string = pytest.param(
+ CiTestCase.random_string(), None, id="random_string"
+ )
+ else:
+ random_string = (CiTestCase.random_string(), None)
+
+ @pytest.mark.parametrize(
+ "cmdline,expected_cfg",
+ [
+ # Return None if cmdline has no cc:<YAML>end_cc content.
+ random_string,
+ # Return None if YAML content is empty string.
+ ("foo cc: end_cc bar", None),
+ # Return expected dictionary without trailing end_cc marker.
+ ("foo cc: ssh_pwauth: true", {"ssh_pwauth": True}),
+ # Return expected dictionary w escaped newline and no end_cc.
+ ("foo cc: ssh_pwauth: true\\n", {"ssh_pwauth": True}),
+ # Return expected dictionary of yaml between cc: and end_cc.
+ ("foo cc: ssh_pwauth: true end_cc bar", {"ssh_pwauth": True}),
+ # Return dict with list value w escaped newline, no end_cc.
+ (
+ "cc: ssh_import_id: [smoser, kirkland]\\n",
+ {"ssh_import_id": ["smoser", "kirkland"]},
+ ),
+ # Parse urlencoded brackets in yaml content.
+ (
+ "cc: ssh_import_id: %5Bsmoser, kirkland%5D end_cc",
+ {"ssh_import_id": ["smoser", "kirkland"]},
+ ),
+ # Parse complete urlencoded yaml content.
+ (
+ "cc: ssh_import_id%3A%20%5Buser1%2C%20user2%5D end_cc",
+ {"ssh_import_id": ["user1", "user2"]},
+ ),
+ # Parse nested dictionary in yaml content.
+ (
+ "cc: ntp: {enabled: true, ntp_client: myclient} end_cc",
+ {"ntp": {"enabled": True, "ntp_client": "myclient"}},
+ ),
+ # Parse single mapping value in yaml content.
+ ("cc: ssh_import_id: smoser end_cc", {"ssh_import_id": "smoser"}),
+ # Parse multiline content with multiple mapping and nested lists.
+ (
+ "cc: ssh_import_id: [smoser, bob]\\n"
+ "runcmd: [ [ ls, -l ], echo hi ] end_cc",
+ {
+ "ssh_import_id": ["smoser", "bob"],
+ "runcmd": [["ls", "-l"], "echo hi"],
+ },
+ ),
+ # Parse multiline encoded content w/ mappings and nested lists.
+ (
+ "cc: ssh_import_id: %5Bsmoser, bob%5D\\n"
+ "runcmd: [ [ ls, -l ], echo hi ] end_cc",
+ {
+ "ssh_import_id": ["smoser", "bob"],
+ "runcmd": [["ls", "-l"], "echo hi"],
+ },
+ ),
+ # test encoded escaped newlines work.
+ #
+ # unquote(encoded_content)
+ # 'ssh_import_id: [smoser, bob]\\nruncmd: [ [ ls, -l ], echo hi ]'
+ (
+ (
+ "cc: " + "ssh_import_id%3A%20%5Bsmoser%2C%20bob%5D%5Cn"
+ "runcmd%3A%20%5B%20%5B%20ls%2C%20-l%20%5D%2C"
+ "%20echo%20hi%20%5D" + " end_cc"
+ ),
+ {
+ "ssh_import_id": ["smoser", "bob"],
+ "runcmd": [["ls", "-l"], "echo hi"],
+ },
+ ),
+ # test encoded newlines work.
+ #
+ # unquote(encoded_content)
+ # 'ssh_import_id: [smoser, bob]\nruncmd: [ [ ls, -l ], echo hi ]'
+ (
+ (
+ "cc: " + "ssh_import_id%3A%20%5Bsmoser%2C%20bob%5D%0A"
+ "runcmd%3A%20%5B%20%5B%20ls%2C%20-l%20%5D%2C"
+ "%20echo%20hi%20%5D" + " end_cc"
+ ),
+ {
+ "ssh_import_id": ["smoser", "bob"],
+ "runcmd": [["ls", "-l"], "echo hi"],
+ },
+ ),
+ # Parse and merge multiple yaml content sections.
+ (
+ "cc:ssh_import_id: [smoser, bob] end_cc "
+ "cc: runcmd: [ [ ls, -l ] ] end_cc",
+ {"ssh_import_id": ["smoser", "bob"], "runcmd": [["ls", "-l"]]},
+ ),
+ # Parse and merge multiple encoded yaml content sections.
+ (
+ "cc:ssh_import_id%3A%20%5Bsmoser%5D end_cc "
+ "cc:runcmd%3A%20%5B%20%5B%20ls%2C%20-l%20%5D%20%5D end_cc",
+ {"ssh_import_id": ["smoser"], "runcmd": [["ls", "-l"]]},
+ ),
+ ],
+ )
+ def test_read_conf_from_cmdline_config(self, expected_cfg, cmdline):
+ assert expected_cfg == util.read_conf_from_cmdline(cmdline=cmdline)
+
+
+class TestMountCb:
+ """Tests for ``util.mount_cb``.
+
+ These tests consider the "unit" under test to be ``util.mount_cb`` and
+ ``util.unmounter``, which is only used by ``mount_cb``.
+
+ TODO: Test default mtype determination
+ TODO: Test the if/else branch that actually performs the mounting operation
+ """
+
+ @pytest.fixture
+ def already_mounted_device_and_mountdict(self):
+ """Mock an already-mounted device, and yield (device, mount dict)"""
+ device = "/dev/fake0"
+ mountpoint = "/mnt/fake"
+ with mock.patch("cloudinit.util.subp.subp"):
+ with mock.patch("cloudinit.util.mounts") as m_mounts:
+ mounts = {device: {"mountpoint": mountpoint}}
+ m_mounts.return_value = mounts
+ yield device, mounts[device]
+
+ @pytest.fixture
+ def already_mounted_device(self, already_mounted_device_and_mountdict):
+ """already_mounted_device_and_mountdict, but return only the device"""
+ return already_mounted_device_and_mountdict[0]
+
+ @pytest.mark.parametrize(
+ "mtype,expected",
+ [
+ # While the filesystem is called iso9660, the mount type is cd9660
+ ("iso9660", "cd9660"),
+ # vfat is generally called "msdos" on BSD
+ ("vfat", "msdos"),
+ # judging from man pages, only FreeBSD has this alias
+ ("msdosfs", "msdos"),
+ # Test happy path
+ ("ufs", "ufs"),
+ ],
+ )
+ @mock.patch("cloudinit.util.is_Linux", autospec=True)
+ @mock.patch("cloudinit.util.is_BSD", autospec=True)
+ @mock.patch("cloudinit.util.subp.subp")
+ @mock.patch("cloudinit.temp_utils.tempdir", autospec=True)
+ def test_normalize_mtype_on_bsd(
+ self, m_tmpdir, m_subp, m_is_BSD, m_is_Linux, mtype, expected
+ ):
+ m_is_BSD.return_value = True
+ m_is_Linux.return_value = False
+ m_tmpdir.return_value.__enter__ = mock.Mock(
+ autospec=True, return_value="/tmp/fake"
+ )
+ m_tmpdir.return_value.__exit__ = mock.Mock(
+ autospec=True, return_value=True
+ )
+ callback = mock.Mock(autospec=True)
+
+ util.mount_cb("/dev/fake0", callback, mtype=mtype)
+ assert (
+ mock.call(
+ [
+ "mount",
+ "-o",
+ "ro",
+ "-t",
+ expected,
+ "/dev/fake0",
+ "/tmp/fake",
+ ],
+ update_env=None,
+ )
+ in m_subp.call_args_list
+ )
+
+ @pytest.mark.parametrize("invalid_mtype", [int(0), float(0.0), dict()])
+ def test_typeerror_raised_for_invalid_mtype(self, invalid_mtype):
+ with pytest.raises(TypeError):
+ util.mount_cb(mock.Mock(), mock.Mock(), mtype=invalid_mtype)
+
+ @mock.patch("cloudinit.util.subp.subp")
+ def test_already_mounted_does_not_mount_or_umount_anything(
+ self, m_subp, already_mounted_device
+ ):
+ util.mount_cb(already_mounted_device, mock.Mock())
+
+ assert 0 == m_subp.call_count
+
+ @pytest.mark.parametrize("trailing_slash_in_mounts", ["/", ""])
+ def test_already_mounted_calls_callback(
+ self, trailing_slash_in_mounts, already_mounted_device_and_mountdict
+ ):
+ device, mount_dict = already_mounted_device_and_mountdict
+ mountpoint = mount_dict["mountpoint"]
+ mount_dict["mountpoint"] += trailing_slash_in_mounts
+
+ callback = mock.Mock()
+ util.mount_cb(device, callback)
+
+ # The mountpoint passed to callback should always have a trailing
+ # slash, regardless of the input
+ assert [mock.call(mountpoint + "/")] == callback.call_args_list
+
+ def test_already_mounted_calls_callback_with_data(
+ self, already_mounted_device
+ ):
+ callback = mock.Mock()
+ util.mount_cb(
+ already_mounted_device, callback, data=mock.sentinel.data
+ )
+
+ assert [
+ mock.call(mock.ANY, mock.sentinel.data)
+ ] == callback.call_args_list
+
+
+@mock.patch("cloudinit.util.write_file")
+class TestEnsureFile:
+ """Tests for ``cloudinit.util.ensure_file``."""
+
+ def test_parameters_passed_through(self, m_write_file):
+ """Test the parameters in the signature are passed to write_file."""
+ util.ensure_file(
+ mock.sentinel.path,
+ mode=mock.sentinel.mode,
+ preserve_mode=mock.sentinel.preserve_mode,
+ )
+
+ assert 1 == m_write_file.call_count
+ args, kwargs = m_write_file.call_args
+ assert (mock.sentinel.path,) == args
+ assert mock.sentinel.mode == kwargs["mode"]
+ assert mock.sentinel.preserve_mode == kwargs["preserve_mode"]
+
+ @pytest.mark.parametrize(
+ "kwarg,expected",
+ [
+ # Files should be world-readable by default
+ ("mode", 0o644),
+ # The previous behaviour of not preserving mode should be retained
+ ("preserve_mode", False),
+ ],
+ )
+ def test_defaults(self, m_write_file, kwarg, expected):
+ """Test that ensure_file defaults appropriately."""
+ util.ensure_file(mock.sentinel.path)
+
+ assert 1 == m_write_file.call_count
+ _args, kwargs = m_write_file.call_args
+ assert expected == kwargs[kwarg]
+
+ def test_static_parameters_are_passed(self, m_write_file):
+ """Test that the static write_files parameters are passed correctly."""
+ util.ensure_file(mock.sentinel.path)
+
+ assert 1 == m_write_file.call_count
+ _args, kwargs = m_write_file.call_args
+ assert "" == kwargs["content"]
+ assert "ab" == kwargs["omode"]
+
+
+@mock.patch("cloudinit.util.grp.getgrnam")
+@mock.patch("cloudinit.util.os.setgid")
+@mock.patch("cloudinit.util.os.umask")
+class TestRedirectOutputPreexecFn:
+ """This tests specifically the preexec_fn used in redirect_output."""
+
+ @pytest.fixture(params=["outfmt", "errfmt"])
+ def preexec_fn(self, request):
+ """A fixture to gather the preexec_fn used by redirect_output.
+
+ This enables simpler direct testing of it, and parameterises any tests
+ using it to cover both the stdout and stderr code paths.
+ """
+ test_string = "| piped output to invoke subprocess"
+ if request.param == "outfmt":
+ args = (test_string, None)
+ elif request.param == "errfmt":
+ args = (None, test_string)
+ with mock.patch("cloudinit.util.subprocess.Popen") as m_popen:
+ util.redirect_output(*args)
+
+ assert 1 == m_popen.call_count
+ _args, kwargs = m_popen.call_args
+ assert "preexec_fn" in kwargs, "preexec_fn not passed to Popen"
+ return kwargs["preexec_fn"]
+
+ def test_preexec_fn_sets_umask(
+ self, m_os_umask, _m_setgid, _m_getgrnam, preexec_fn
+ ):
+ """preexec_fn should set a mask that avoids world-readable files."""
+ preexec_fn()
+
+ assert [mock.call(0o037)] == m_os_umask.call_args_list
+
+ def test_preexec_fn_sets_group_id_if_adm_group_present(
+ self, _m_os_umask, m_setgid, m_getgrnam, preexec_fn
+ ):
+ """We should setgrp to adm if present, so files are owned by them."""
+ fake_group = mock.Mock(gr_gid=mock.sentinel.gr_gid)
+ m_getgrnam.return_value = fake_group
+
+ preexec_fn()
+
+ assert [mock.call("adm")] == m_getgrnam.call_args_list
+ assert [mock.call(mock.sentinel.gr_gid)] == m_setgid.call_args_list
+
+ def test_preexec_fn_handles_absent_adm_group_gracefully(
+ self, _m_os_umask, m_setgid, m_getgrnam, preexec_fn
+ ):
+ """We should handle an absent adm group gracefully."""
+ m_getgrnam.side_effect = KeyError("getgrnam(): name not found: 'adm'")
+
+ preexec_fn()
+
+ assert 0 == m_setgid.call_count
+
+
+class FakeSelinux(object):
def __init__(self, match_what):
self.match_what = match_what
self.restored = []
@@ -141,7 +1457,7 @@ class TestWriteFile(helpers.TestCase):
path = os.path.join(self.tmp, "NewFile.txt")
contents = "Hey there"
- open(path, 'w').close()
+ open(path, "w").close()
os.chmod(path, 0o666)
util.write_file(path, contents, preserve_mode=True)
@@ -175,15 +1491,16 @@ class TestWriteFile(helpers.TestCase):
fake_se = FakeSelinux(my_file)
- with mock.patch.object(importer, 'import_module',
- return_value=fake_se) as mockobj:
+ with mock.patch.object(
+ importer, "import_module", return_value=fake_se
+ ) as mockobj:
with util.SeLinuxGuard(my_file) as is_on:
self.assertTrue(is_on)
self.assertEqual(1, len(fake_se.restored))
self.assertEqual(my_file, fake_se.restored[0])
- mockobj.assert_called_once_with('selinux')
+ mockobj.assert_called_once_with("selinux")
class TestDeleteDirContents(helpers.TestCase):
@@ -254,15 +1571,16 @@ class TestDeleteDirContents(helpers.TestCase):
class TestKeyValStrings(helpers.TestCase):
def test_keyval_str_to_dict(self):
- expected = {'1': 'one', '2': 'one+one', 'ro': True}
+ expected = {"1": "one", "2": "one+one", "ro": True}
cmdline = "1=one ro 2=one+one"
self.assertEqual(expected, util.keyval_str_to_dict(cmdline))
class TestGetCmdline(helpers.TestCase):
def test_cmdline_reads_debug_env(self):
- with mock.patch.dict("os.environ",
- values={'DEBUG_PROC_CMDLINE': 'abcd 123'}):
+ with mock.patch.dict(
+ "os.environ", values={"DEBUG_PROC_CMDLINE": "abcd 123"}
+ ):
ret = util.get_cmdline()
self.assertEqual("abcd 123", ret)
@@ -272,59 +1590,75 @@ class TestLoadYaml(helpers.CiTestCase):
with_logs = True
def test_simple(self):
- mydata = {'1': "one", '2': "two"}
+ mydata = {"1": "one", "2": "two"}
self.assertEqual(util.load_yaml(yaml.dump(mydata)), mydata)
def test_nonallowed_returns_default(self):
- '''Any unallowed types result in returning default; log the issue.'''
+ """Any unallowed types result in returning default; log the issue."""
# for now, anything not in the allowed list just returns the default.
- myyaml = yaml.dump({'1': "one"})
- self.assertEqual(util.load_yaml(blob=myyaml,
- default=self.mydefault,
- allowed=(str,)),
- self.mydefault)
+ myyaml = yaml.dump({"1": "one"})
+ self.assertEqual(
+ util.load_yaml(
+ blob=myyaml, default=self.mydefault, allowed=(str,)
+ ),
+ self.mydefault,
+ )
regex = re.compile(
- r'Yaml load allows \(<(class|type) \'str\'>,\) root types, but'
- r' got dict')
- self.assertTrue(regex.search(self.logs.getvalue()),
- msg='Missing expected yaml load error')
+ r"Yaml load allows \(<(class|type) \'str\'>,\) root types, but"
+ r" got dict"
+ )
+ self.assertTrue(
+ regex.search(self.logs.getvalue()),
+ msg="Missing expected yaml load error",
+ )
def test_bogus_scan_error_returns_default(self):
- '''On Yaml scan error, load_yaml returns the default and logs issue.'''
+ """On Yaml scan error, load_yaml returns the default and logs issue."""
badyaml = "1\n 2:"
- self.assertEqual(util.load_yaml(blob=badyaml,
- default=self.mydefault),
- self.mydefault)
+ self.assertEqual(
+ util.load_yaml(blob=badyaml, default=self.mydefault),
+ self.mydefault,
+ )
self.assertIn(
- 'Failed loading yaml blob. Invalid format at line 2 column 3:'
+ "Failed loading yaml blob. Invalid format at line 2 column 3:"
' "mapping values are not allowed here',
- self.logs.getvalue())
+ self.logs.getvalue(),
+ )
def test_bogus_parse_error_returns_default(self):
- '''On Yaml parse error, load_yaml returns default and logs issue.'''
+ """On Yaml parse error, load_yaml returns default and logs issue."""
badyaml = "{}}"
- self.assertEqual(util.load_yaml(blob=badyaml,
- default=self.mydefault),
- self.mydefault)
+ self.assertEqual(
+ util.load_yaml(blob=badyaml, default=self.mydefault),
+ self.mydefault,
+ )
self.assertIn(
- 'Failed loading yaml blob. Invalid format at line 1 column 3:'
- " \"expected \'<document start>\', but found \'}\'",
- self.logs.getvalue())
+ "Failed loading yaml blob. Invalid format at line 1 column 3:"
+ " \"expected '<document start>', but found '}'",
+ self.logs.getvalue(),
+ )
def test_unsafe_types(self):
# should not load complex types
- unsafe_yaml = yaml.dump((1, 2, 3,))
- self.assertEqual(util.load_yaml(blob=unsafe_yaml,
- default=self.mydefault),
- self.mydefault)
+ unsafe_yaml = yaml.dump(
+ (
+ 1,
+ 2,
+ 3,
+ )
+ )
+ self.assertEqual(
+ util.load_yaml(blob=unsafe_yaml, default=self.mydefault),
+ self.mydefault,
+ )
def test_python_unicode(self):
# complex type of python/unicode is explicitly allowed
- myobj = {'1': "FOOBAR"}
+ myobj = {"1": "FOOBAR"}
safe_yaml = yaml.dump(myobj)
- self.assertEqual(util.load_yaml(blob=safe_yaml,
- default=self.mydefault),
- myobj)
+ self.assertEqual(
+ util.load_yaml(blob=safe_yaml, default=self.mydefault), myobj
+ )
def test_none_returns_default(self):
"""If yaml.load returns None, then default should be returned."""
@@ -332,168 +1666,177 @@ class TestLoadYaml(helpers.CiTestCase):
mdef = self.mydefault
self.assertEqual(
[(b, self.mydefault) for b in blobs],
- [(b, util.load_yaml(blob=b, default=mdef)) for b in blobs])
+ [(b, util.load_yaml(blob=b, default=mdef)) for b in blobs],
+ )
class TestMountinfoParsing(helpers.ResourceUsingTestCase):
def test_invalid_mountinfo(self):
- line = ("20 1 252:1 / / rw,relatime - ext4 /dev/mapper/vg0-root"
- "rw,errors=remount-ro,data=ordered")
+ line = (
+ "20 1 252:1 / / rw,relatime - ext4 /dev/mapper/vg0-root"
+ "rw,errors=remount-ro,data=ordered"
+ )
elements = line.split()
for i in range(len(elements) + 1):
- lines = [' '.join(elements[0:i])]
+ lines = [" ".join(elements[0:i])]
if i < 10:
expected = None
else:
- expected = ('/dev/mapper/vg0-root', 'ext4', '/')
- self.assertEqual(expected, util.parse_mount_info('/', lines))
+ expected = ("/dev/mapper/vg0-root", "ext4", "/")
+ self.assertEqual(expected, util.parse_mount_info("/", lines))
def test_precise_ext4_root(self):
- lines = helpers.readResource('mountinfo_precise_ext4.txt').splitlines()
+ lines = helpers.readResource("mountinfo_precise_ext4.txt").splitlines()
- expected = ('/dev/mapper/vg0-root', 'ext4', '/')
- self.assertEqual(expected, util.parse_mount_info('/', lines))
- self.assertEqual(expected, util.parse_mount_info('/usr', lines))
- self.assertEqual(expected, util.parse_mount_info('/usr/bin', lines))
+ expected = ("/dev/mapper/vg0-root", "ext4", "/")
+ self.assertEqual(expected, util.parse_mount_info("/", lines))
+ self.assertEqual(expected, util.parse_mount_info("/usr", lines))
+ self.assertEqual(expected, util.parse_mount_info("/usr/bin", lines))
- expected = ('/dev/md0', 'ext4', '/boot')
- self.assertEqual(expected, util.parse_mount_info('/boot', lines))
- self.assertEqual(expected, util.parse_mount_info('/boot/grub', lines))
+ expected = ("/dev/md0", "ext4", "/boot")
+ self.assertEqual(expected, util.parse_mount_info("/boot", lines))
+ self.assertEqual(expected, util.parse_mount_info("/boot/grub", lines))
- expected = ('/dev/mapper/vg0-root', 'ext4', '/')
- self.assertEqual(expected, util.parse_mount_info('/home', lines))
- self.assertEqual(expected, util.parse_mount_info('/home/me', lines))
+ expected = ("/dev/mapper/vg0-root", "ext4", "/")
+ self.assertEqual(expected, util.parse_mount_info("/home", lines))
+ self.assertEqual(expected, util.parse_mount_info("/home/me", lines))
- expected = ('tmpfs', 'tmpfs', '/run')
- self.assertEqual(expected, util.parse_mount_info('/run', lines))
+ expected = ("tmpfs", "tmpfs", "/run")
+ self.assertEqual(expected, util.parse_mount_info("/run", lines))
- expected = ('none', 'tmpfs', '/run/lock')
- self.assertEqual(expected, util.parse_mount_info('/run/lock', lines))
+ expected = ("none", "tmpfs", "/run/lock")
+ self.assertEqual(expected, util.parse_mount_info("/run/lock", lines))
def test_raring_btrfs_root(self):
- lines = helpers.readResource('mountinfo_raring_btrfs.txt').splitlines()
+ lines = helpers.readResource("mountinfo_raring_btrfs.txt").splitlines()
- expected = ('/dev/vda1', 'btrfs', '/')
- self.assertEqual(expected, util.parse_mount_info('/', lines))
- self.assertEqual(expected, util.parse_mount_info('/usr', lines))
- self.assertEqual(expected, util.parse_mount_info('/usr/bin', lines))
- self.assertEqual(expected, util.parse_mount_info('/boot', lines))
- self.assertEqual(expected, util.parse_mount_info('/boot/grub', lines))
+ expected = ("/dev/vda1", "btrfs", "/")
+ self.assertEqual(expected, util.parse_mount_info("/", lines))
+ self.assertEqual(expected, util.parse_mount_info("/usr", lines))
+ self.assertEqual(expected, util.parse_mount_info("/usr/bin", lines))
+ self.assertEqual(expected, util.parse_mount_info("/boot", lines))
+ self.assertEqual(expected, util.parse_mount_info("/boot/grub", lines))
- expected = ('/dev/vda1', 'btrfs', '/home')
- self.assertEqual(expected, util.parse_mount_info('/home', lines))
- self.assertEqual(expected, util.parse_mount_info('/home/me', lines))
+ expected = ("/dev/vda1", "btrfs", "/home")
+ self.assertEqual(expected, util.parse_mount_info("/home", lines))
+ self.assertEqual(expected, util.parse_mount_info("/home/me", lines))
- expected = ('tmpfs', 'tmpfs', '/run')
- self.assertEqual(expected, util.parse_mount_info('/run', lines))
+ expected = ("tmpfs", "tmpfs", "/run")
+ self.assertEqual(expected, util.parse_mount_info("/run", lines))
- expected = ('none', 'tmpfs', '/run/lock')
- self.assertEqual(expected, util.parse_mount_info('/run/lock', lines))
+ expected = ("none", "tmpfs", "/run/lock")
+ self.assertEqual(expected, util.parse_mount_info("/run/lock", lines))
- @mock.patch('cloudinit.util.os')
- @mock.patch('cloudinit.subp.subp')
+ @mock.patch("cloudinit.util.os")
+ @mock.patch("cloudinit.subp.subp")
def test_get_device_info_from_zpool(self, zpool_output, m_os):
# mock /dev/zfs exists
m_os.path.exists.return_value = True
# mock subp command from util.get_mount_info_fs_on_zpool
zpool_output.return_value = (
- helpers.readResource('zpool_status_simple.txt'), ''
+ helpers.readResource("zpool_status_simple.txt"),
+ "",
)
# save function return values and do asserts
- ret = util.get_device_info_from_zpool('vmzroot')
- self.assertEqual('gpt/system', ret)
+ ret = util.get_device_info_from_zpool("vmzroot")
+ self.assertEqual("gpt/system", ret)
self.assertIsNotNone(ret)
- m_os.path.exists.assert_called_with('/dev/zfs')
+ m_os.path.exists.assert_called_with("/dev/zfs")
- @mock.patch('cloudinit.util.os')
+ @mock.patch("cloudinit.util.os")
def test_get_device_info_from_zpool_no_dev_zfs(self, m_os):
# mock /dev/zfs missing
m_os.path.exists.return_value = False
# save function return values and do asserts
- ret = util.get_device_info_from_zpool('vmzroot')
+ ret = util.get_device_info_from_zpool("vmzroot")
self.assertIsNone(ret)
- @mock.patch('cloudinit.util.os')
- @mock.patch('cloudinit.subp.subp')
+ @mock.patch("cloudinit.util.os")
+ @mock.patch("cloudinit.subp.subp")
def test_get_device_info_from_zpool_handles_no_zpool(self, m_sub, m_os):
"""Handle case where there is no zpool command"""
# mock /dev/zfs exists
m_os.path.exists.return_value = True
m_sub.side_effect = subp.ProcessExecutionError("No zpool cmd")
- ret = util.get_device_info_from_zpool('vmzroot')
+ ret = util.get_device_info_from_zpool("vmzroot")
self.assertIsNone(ret)
- @mock.patch('cloudinit.util.os')
- @mock.patch('cloudinit.subp.subp')
+ @mock.patch("cloudinit.util.os")
+ @mock.patch("cloudinit.subp.subp")
def test_get_device_info_from_zpool_on_error(self, zpool_output, m_os):
# mock /dev/zfs exists
m_os.path.exists.return_value = True
# mock subp command from util.get_mount_info_fs_on_zpool
zpool_output.return_value = (
- helpers.readResource('zpool_status_simple.txt'), 'error'
+ helpers.readResource("zpool_status_simple.txt"),
+ "error",
)
# save function return values and do asserts
- ret = util.get_device_info_from_zpool('vmzroot')
+ ret = util.get_device_info_from_zpool("vmzroot")
self.assertIsNone(ret)
- @mock.patch('cloudinit.subp.subp')
+ @mock.patch("cloudinit.subp.subp")
def test_parse_mount_with_ext(self, mount_out):
mount_out.return_value = (
- helpers.readResource('mount_parse_ext.txt'), '')
+ helpers.readResource("mount_parse_ext.txt"),
+ "",
+ )
# this one is valid and exists in mount_parse_ext.txt
- ret = util.parse_mount('/var')
- self.assertEqual(('/dev/mapper/vg00-lv_var', 'ext4', '/var'), ret)
+ ret = util.parse_mount("/var")
+ self.assertEqual(("/dev/mapper/vg00-lv_var", "ext4", "/var"), ret)
# another one that is valid and exists
- ret = util.parse_mount('/')
- self.assertEqual(('/dev/mapper/vg00-lv_root', 'ext4', '/'), ret)
+ ret = util.parse_mount("/")
+ self.assertEqual(("/dev/mapper/vg00-lv_root", "ext4", "/"), ret)
# this one exists in mount_parse_ext.txt
- ret = util.parse_mount('/sys/kernel/debug')
+ ret = util.parse_mount("/sys/kernel/debug")
self.assertIsNone(ret)
# this one does not even exist in mount_parse_ext.txt
- ret = util.parse_mount('/not/existing/mount')
+ ret = util.parse_mount("/not/existing/mount")
self.assertIsNone(ret)
- @mock.patch('cloudinit.subp.subp')
+ @mock.patch("cloudinit.subp.subp")
def test_parse_mount_with_zfs(self, mount_out):
mount_out.return_value = (
- helpers.readResource('mount_parse_zfs.txt'), '')
+ helpers.readResource("mount_parse_zfs.txt"),
+ "",
+ )
# this one is valid and exists in mount_parse_zfs.txt
- ret = util.parse_mount('/var')
- self.assertEqual(('vmzroot/ROOT/freebsd/var', 'zfs', '/var'), ret)
+ ret = util.parse_mount("/var")
+ self.assertEqual(("vmzroot/ROOT/freebsd/var", "zfs", "/var"), ret)
# this one is the root, valid and also exists in mount_parse_zfs.txt
- ret = util.parse_mount('/')
- self.assertEqual(('vmzroot/ROOT/freebsd', 'zfs', '/'), ret)
+ ret = util.parse_mount("/")
+ self.assertEqual(("vmzroot/ROOT/freebsd", "zfs", "/"), ret)
# this one does not even exist in mount_parse_ext.txt
- ret = util.parse_mount('/not/existing/mount')
+ ret = util.parse_mount("/not/existing/mount")
self.assertIsNone(ret)
class TestIsX86(helpers.CiTestCase):
-
def test_is_x86_matches_x86_types(self):
"""is_x86 returns True if CPU architecture matches."""
- matched_arches = ['x86_64', 'i386', 'i586', 'i686']
+ matched_arches = ["x86_64", "i386", "i586", "i686"]
for arch in matched_arches:
self.assertTrue(
- util.is_x86(arch), 'Expected is_x86 for arch "%s"' % arch)
+ util.is_x86(arch), 'Expected is_x86 for arch "%s"' % arch
+ )
def test_is_x86_unmatched_types(self):
"""is_x86 returns Fale on non-intel x86 architectures."""
- unmatched_arches = ['ia64', '9000/800', 'arm64v71']
+ unmatched_arches = ["ia64", "9000/800", "arm64v71"]
for arch in unmatched_arches:
self.assertFalse(
- util.is_x86(arch), 'Expected not is_x86 for arch "%s"' % arch)
+ util.is_x86(arch), 'Expected not is_x86 for arch "%s"' % arch
+ )
- @mock.patch('cloudinit.util.os.uname')
+ @mock.patch("cloudinit.util.os.uname")
def test_is_x86_calls_uname_for_architecture(self, m_uname):
"""is_x86 returns True if platform from uname matches."""
- m_uname.return_value = [0, 1, 2, 3, 'x86_64']
+ m_uname.return_value = [0, 1, 2, 3, "x86_64"]
self.assertTrue(util.is_x86())
class TestGetConfigLogfiles(helpers.CiTestCase):
-
def test_empty_cfg_returns_empty_list(self):
"""An empty config passed to get_config_logfiles returns empty list."""
self.assertEqual([], util.get_config_logfiles(None))
@@ -502,39 +1845,56 @@ class TestGetConfigLogfiles(helpers.CiTestCase):
def test_default_log_file_present(self):
"""When default_log_file is set get_config_logfiles finds it."""
self.assertEqual(
- ['/my.log'],
- util.get_config_logfiles({'def_log_file': '/my.log'}))
+ ["/my.log"], util.get_config_logfiles({"def_log_file": "/my.log"})
+ )
def test_output_logs_parsed_when_teeing_files(self):
"""When output configuration is parsed when teeing files."""
self.assertEqual(
- ['/himom.log', '/my.log'],
- sorted(util.get_config_logfiles({
- 'def_log_file': '/my.log',
- 'output': {'all': '|tee -a /himom.log'}})))
+ ["/himom.log", "/my.log"],
+ sorted(
+ util.get_config_logfiles(
+ {
+ "def_log_file": "/my.log",
+ "output": {"all": "|tee -a /himom.log"},
+ }
+ )
+ ),
+ )
def test_output_logs_parsed_when_redirecting(self):
"""When output configuration is parsed when redirecting to a file."""
self.assertEqual(
- ['/my.log', '/test.log'],
- sorted(util.get_config_logfiles({
- 'def_log_file': '/my.log',
- 'output': {'all': '>/test.log'}})))
+ ["/my.log", "/test.log"],
+ sorted(
+ util.get_config_logfiles(
+ {
+ "def_log_file": "/my.log",
+ "output": {"all": ">/test.log"},
+ }
+ )
+ ),
+ )
def test_output_logs_parsed_when_appending(self):
"""When output configuration is parsed when appending to a file."""
self.assertEqual(
- ['/my.log', '/test.log'],
- sorted(util.get_config_logfiles({
- 'def_log_file': '/my.log',
- 'output': {'all': '>> /test.log'}})))
+ ["/my.log", "/test.log"],
+ sorted(
+ util.get_config_logfiles(
+ {
+ "def_log_file": "/my.log",
+ "output": {"all": ">> /test.log"},
+ }
+ )
+ ),
+ )
class TestMultiLog(helpers.FilesystemMockingTestCase):
-
def _createConsole(self, root):
- os.mkdir(os.path.join(root, 'dev'))
- open(os.path.join(root, 'dev', 'console'), 'a').close()
+ os.mkdir(os.path.join(root, "dev"))
+ open(os.path.join(root, "dev", "console"), "a").close()
def setUp(self):
super(TestMultiLog, self).setUp()
@@ -548,60 +1908,64 @@ class TestMultiLog(helpers.FilesystemMockingTestCase):
self.patchStdoutAndStderr(self.stdout, self.stderr)
def test_stderr_used_by_default(self):
- logged_string = 'test stderr output'
+ logged_string = "test stderr output"
util.multi_log(logged_string)
self.assertEqual(logged_string, self.stderr.getvalue())
def test_stderr_not_used_if_false(self):
- util.multi_log('should not see this', stderr=False)
- self.assertEqual('', self.stderr.getvalue())
+ util.multi_log("should not see this", stderr=False)
+ self.assertEqual("", self.stderr.getvalue())
def test_logs_go_to_console_by_default(self):
self._createConsole(self.root)
- logged_string = 'something very important'
+ logged_string = "something very important"
util.multi_log(logged_string)
- self.assertEqual(logged_string, open('/dev/console').read())
+ self.assertEqual(logged_string, open("/dev/console").read())
def test_logs_dont_go_to_stdout_if_console_exists(self):
self._createConsole(self.root)
- util.multi_log('something')
- self.assertEqual('', self.stdout.getvalue())
+ util.multi_log("something")
+ self.assertEqual("", self.stdout.getvalue())
def test_logs_go_to_stdout_if_console_does_not_exist(self):
- logged_string = 'something very important'
+ logged_string = "something very important"
util.multi_log(logged_string)
self.assertEqual(logged_string, self.stdout.getvalue())
+ def test_logs_dont_go_to_stdout_if_fallback_to_stdout_is_false(self):
+ util.multi_log("something", fallback_to_stdout=False)
+ self.assertEqual("", self.stdout.getvalue())
+
def test_logs_go_to_log_if_given(self):
log = mock.MagicMock()
- logged_string = 'something very important'
+ logged_string = "something very important"
util.multi_log(logged_string, log=log)
- self.assertEqual([((mock.ANY, logged_string), {})],
- log.log.call_args_list)
+ self.assertEqual(
+ [((mock.ANY, logged_string), {})], log.log.call_args_list
+ )
def test_newlines_stripped_from_log_call(self):
log = mock.MagicMock()
- expected_string = 'something very important'
- util.multi_log('{0}\n'.format(expected_string), log=log)
+ expected_string = "something very important"
+ util.multi_log("{0}\n".format(expected_string), log=log)
self.assertEqual((mock.ANY, expected_string), log.log.call_args[0])
def test_log_level_defaults_to_debug(self):
log = mock.MagicMock()
- util.multi_log('message', log=log)
+ util.multi_log("message", log=log)
self.assertEqual((logging.DEBUG, mock.ANY), log.log.call_args[0])
def test_given_log_level_used(self):
log = mock.MagicMock()
log_level = mock.Mock()
- util.multi_log('message', log=log, log_level=log_level)
+ util.multi_log("message", log=log, log_level=log_level)
self.assertEqual((log_level, mock.ANY), log.log.call_args[0])
class TestMessageFromString(helpers.TestCase):
-
def test_unicode_not_messed_up(self):
- roundtripped = util.message_from_string(u'\n').as_string()
- self.assertNotIn('\x00', roundtripped)
+ roundtripped = util.message_from_string("\n").as_string()
+ self.assertNotIn("\x00", roundtripped)
class TestReadSeeded(helpers.TestCase):
@@ -614,12 +1978,13 @@ class TestReadSeeded(helpers.TestCase):
ud = b"userdatablob"
vd = b"vendordatablob"
helpers.populate_dir(
- self.tmp, {'meta-data': "key1: val1", 'user-data': ud,
- 'vendor-data': vd})
+ self.tmp,
+ {"meta-data": "key1: val1", "user-data": ud, "vendor-data": vd},
+ )
sdir = self.tmp + os.path.sep
(found_md, found_ud, found_vd) = util.read_seeded(sdir)
- self.assertEqual(found_md, {'key1': 'val1'})
+ self.assertEqual(found_md, {"key1": "val1"})
self.assertEqual(found_ud, ud)
self.assertEqual(found_vd, vd)
@@ -634,157 +1999,189 @@ class TestReadSeededWithoutVendorData(helpers.TestCase):
ud = b"userdatablob"
vd = None
helpers.populate_dir(
- self.tmp, {'meta-data': "key1: val1", 'user-data': ud})
+ self.tmp, {"meta-data": "key1: val1", "user-data": ud}
+ )
sdir = self.tmp + os.path.sep
(found_md, found_ud, found_vd) = util.read_seeded(sdir)
- self.assertEqual(found_md, {'key1': 'val1'})
+ self.assertEqual(found_md, {"key1": "val1"})
self.assertEqual(found_ud, ud)
self.assertEqual(found_vd, vd)
class TestEncode(helpers.TestCase):
"""Test the encoding functions"""
+
def test_decode_binary_plain_text_with_hex(self):
- blob = 'BOOTABLE_FLAG=\x80init=/bin/systemd'
+ blob = "BOOTABLE_FLAG=\x80init=/bin/systemd"
text = util.decode_binary(blob)
self.assertEqual(text, blob)
class TestProcessExecutionError(helpers.TestCase):
- template = ('{description}\n'
- 'Command: {cmd}\n'
- 'Exit code: {exit_code}\n'
- 'Reason: {reason}\n'
- 'Stdout: {stdout}\n'
- 'Stderr: {stderr}')
- empty_attr = '-'
- empty_description = 'Unexpected error while running command.'
+ template = (
+ "{description}\n"
+ "Command: {cmd}\n"
+ "Exit code: {exit_code}\n"
+ "Reason: {reason}\n"
+ "Stdout: {stdout}\n"
+ "Stderr: {stderr}"
+ )
+ empty_attr = "-"
+ empty_description = "Unexpected error while running command."
def test_pexec_error_indent_text(self):
error = subp.ProcessExecutionError()
- msg = 'abc\ndef'
- formatted = 'abc\n{0}def'.format(' ' * 4)
+ msg = "abc\ndef"
+ formatted = "abc\n{0}def".format(" " * 4)
self.assertEqual(error._indent_text(msg, indent_level=4), formatted)
- self.assertEqual(error._indent_text(msg.encode(), indent_level=4),
- formatted.encode())
+ self.assertEqual(
+ error._indent_text(msg.encode(), indent_level=4),
+ formatted.encode(),
+ )
self.assertIsInstance(
- error._indent_text(msg.encode()), type(msg.encode()))
+ error._indent_text(msg.encode()), type(msg.encode())
+ )
def test_pexec_error_type(self):
self.assertIsInstance(subp.ProcessExecutionError(), IOError)
def test_pexec_error_empty_msgs(self):
error = subp.ProcessExecutionError()
- self.assertTrue(all(attr == self.empty_attr for attr in
- (error.stderr, error.stdout, error.reason)))
+ self.assertTrue(
+ all(
+ attr == self.empty_attr
+ for attr in (error.stderr, error.stdout, error.reason)
+ )
+ )
self.assertEqual(error.description, self.empty_description)
- self.assertEqual(str(error), self.template.format(
- description=self.empty_description, exit_code=self.empty_attr,
- reason=self.empty_attr, stdout=self.empty_attr,
- stderr=self.empty_attr, cmd=self.empty_attr))
+ self.assertEqual(
+ str(error),
+ self.template.format(
+ description=self.empty_description,
+ exit_code=self.empty_attr,
+ reason=self.empty_attr,
+ stdout=self.empty_attr,
+ stderr=self.empty_attr,
+ cmd=self.empty_attr,
+ ),
+ )
def test_pexec_error_single_line_msgs(self):
- stdout_msg = 'out out'
- stderr_msg = 'error error'
- cmd = 'test command'
+ stdout_msg = "out out"
+ stderr_msg = "error error"
+ cmd = "test command"
exit_code = 3
error = subp.ProcessExecutionError(
- stdout=stdout_msg, stderr=stderr_msg, exit_code=3, cmd=cmd)
- self.assertEqual(str(error), self.template.format(
- description=self.empty_description, stdout=stdout_msg,
- stderr=stderr_msg, exit_code=str(exit_code),
- reason=self.empty_attr, cmd=cmd))
+ stdout=stdout_msg, stderr=stderr_msg, exit_code=3, cmd=cmd
+ )
+ self.assertEqual(
+ str(error),
+ self.template.format(
+ description=self.empty_description,
+ stdout=stdout_msg,
+ stderr=stderr_msg,
+ exit_code=str(exit_code),
+ reason=self.empty_attr,
+ cmd=cmd,
+ ),
+ )
def test_pexec_error_multi_line_msgs(self):
# make sure bytes is converted handled properly when formatting
- stdout_msg = 'multi\nline\noutput message'.encode()
- stderr_msg = 'multi\nline\nerror message\n\n\n'
+ stdout_msg = "multi\nline\noutput message".encode()
+ stderr_msg = "multi\nline\nerror message\n\n\n"
error = subp.ProcessExecutionError(
- stdout=stdout_msg, stderr=stderr_msg)
+ stdout=stdout_msg, stderr=stderr_msg
+ )
self.assertEqual(
str(error),
- '\n'.join((
- '{description}',
- 'Command: {empty_attr}',
- 'Exit code: {empty_attr}',
- 'Reason: {empty_attr}',
- 'Stdout: multi',
- ' line',
- ' output message',
- 'Stderr: multi',
- ' line',
- ' error message',
- )).format(description=self.empty_description,
- empty_attr=self.empty_attr))
+ "\n".join(
+ (
+ "{description}",
+ "Command: {empty_attr}",
+ "Exit code: {empty_attr}",
+ "Reason: {empty_attr}",
+ "Stdout: multi",
+ " line",
+ " output message",
+ "Stderr: multi",
+ " line",
+ " error message",
+ )
+ ).format(
+ description=self.empty_description, empty_attr=self.empty_attr
+ ),
+ )
class TestSystemIsSnappy(helpers.FilesystemMockingTestCase):
def test_id_in_os_release_quoted(self):
"""os-release containing ID="ubuntu-core" is snappy."""
- orcontent = '\n'.join(['ID="ubuntu-core"', ''])
+ orcontent = "\n".join(['ID="ubuntu-core"', ""])
root_d = self.tmp_dir()
- helpers.populate_dir(root_d, {'etc/os-release': orcontent})
+ helpers.populate_dir(root_d, {"etc/os-release": orcontent})
self.reRoot(root_d)
self.assertTrue(util.system_is_snappy())
def test_id_in_os_release(self):
"""os-release containing ID=ubuntu-core is snappy."""
- orcontent = '\n'.join(['ID=ubuntu-core', ''])
+ orcontent = "\n".join(["ID=ubuntu-core", ""])
root_d = self.tmp_dir()
- helpers.populate_dir(root_d, {'etc/os-release': orcontent})
+ helpers.populate_dir(root_d, {"etc/os-release": orcontent})
self.reRoot(root_d)
self.assertTrue(util.system_is_snappy())
- @mock.patch('cloudinit.util.get_cmdline')
+ @mock.patch("cloudinit.util.get_cmdline")
def test_bad_content_in_os_release_no_effect(self, m_cmdline):
"""malformed os-release should not raise exception."""
- m_cmdline.return_value = 'root=/dev/sda'
- orcontent = '\n'.join(['IDubuntu-core', ''])
+ m_cmdline.return_value = "root=/dev/sda"
+ orcontent = "\n".join(["IDubuntu-core", ""])
root_d = self.tmp_dir()
- helpers.populate_dir(root_d, {'etc/os-release': orcontent})
+ helpers.populate_dir(root_d, {"etc/os-release": orcontent})
self.reRoot()
self.assertFalse(util.system_is_snappy())
- @mock.patch('cloudinit.util.get_cmdline')
+ @mock.patch("cloudinit.util.get_cmdline")
def test_snap_core_in_cmdline_is_snappy(self, m_cmdline):
"""The string snap_core= in kernel cmdline indicates snappy."""
cmdline = (
"BOOT_IMAGE=(loop)/kernel.img root=LABEL=writable "
"snap_core=core_x1.snap snap_kernel=pc-kernel_x1.snap ro "
"net.ifnames=0 init=/lib/systemd/systemd console=tty1 "
- "console=ttyS0 panic=-1")
+ "console=ttyS0 panic=-1"
+ )
m_cmdline.return_value = cmdline
self.assertTrue(util.system_is_snappy())
self.assertTrue(m_cmdline.call_count > 0)
- @mock.patch('cloudinit.util.get_cmdline')
+ @mock.patch("cloudinit.util.get_cmdline")
def test_nothing_found_is_not_snappy(self, m_cmdline):
"""If no positive identification, then not snappy."""
- m_cmdline.return_value = 'root=/dev/sda'
+ m_cmdline.return_value = "root=/dev/sda"
self.reRoot()
self.assertFalse(util.system_is_snappy())
self.assertTrue(m_cmdline.call_count > 0)
- @mock.patch('cloudinit.util.get_cmdline')
+ @mock.patch("cloudinit.util.get_cmdline")
def test_channel_ini_with_snappy_is_snappy(self, m_cmdline):
"""A Channel.ini file with 'ubuntu-core' indicates snappy."""
- m_cmdline.return_value = 'root=/dev/sda'
+ m_cmdline.return_value = "root=/dev/sda"
root_d = self.tmp_dir()
- content = '\n'.join(["[Foo]", "source = 'ubuntu-core'", ""])
- helpers.populate_dir(
- root_d, {'etc/system-image/channel.ini': content})
+ content = "\n".join(["[Foo]", "source = 'ubuntu-core'", ""])
+ helpers.populate_dir(root_d, {"etc/system-image/channel.ini": content})
self.reRoot(root_d)
self.assertTrue(util.system_is_snappy())
- @mock.patch('cloudinit.util.get_cmdline')
+ @mock.patch("cloudinit.util.get_cmdline")
def test_system_image_config_dir_is_snappy(self, m_cmdline):
"""Existence of /etc/system-image/config.d indicates snappy."""
- m_cmdline.return_value = 'root=/dev/sda'
+ m_cmdline.return_value = "root=/dev/sda"
root_d = self.tmp_dir()
helpers.populate_dir(
- root_d, {'etc/system-image/config.d/my.file': "_unused"})
+ root_d, {"etc/system-image/config.d/my.file": "_unused"}
+ )
self.reRoot(root_d)
self.assertTrue(util.system_is_snappy())
@@ -793,41 +2190,52 @@ class TestLoadShellContent(helpers.TestCase):
def test_comments_handled_correctly(self):
"""Shell comments should be allowed in the content."""
self.assertEqual(
- {'key1': 'val1', 'key2': 'val2', 'key3': 'val3 #tricky'},
- util.load_shell_content('\n'.join([
- "#top of file comment",
- "key1=val1 #this is a comment",
- "# second comment",
- 'key2="val2" # inlin comment'
- '#badkey=wark',
- 'key3="val3 #tricky"',
- ''])))
+ {"key1": "val1", "key2": "val2", "key3": "val3 #tricky"},
+ util.load_shell_content(
+ "\n".join(
+ [
+ "#top of file comment",
+ "key1=val1 #this is a comment",
+ "# second comment",
+ 'key2="val2" # inlin comment#badkey=wark',
+ 'key3="val3 #tricky"',
+ "",
+ ]
+ )
+ ),
+ )
class TestGetProcEnv(helpers.TestCase):
"""test get_proc_env."""
- null = b'\x00'
- simple1 = b'HOME=/'
- simple2 = b'PATH=/bin:/sbin'
- bootflag = b'BOOTABLE_FLAG=\x80' # from LP: #1775371
- mixed = b'MIXED=' + b'ab\xccde'
- def _val_decoded(self, blob, encoding='utf-8', errors='replace'):
+ null = b"\x00"
+ simple1 = b"HOME=/"
+ simple2 = b"PATH=/bin:/sbin"
+ bootflag = b"BOOTABLE_FLAG=\x80" # from LP: #1775371
+ mixed = b"MIXED=" + b"ab\xccde"
+
+ def _val_decoded(self, blob, encoding="utf-8", errors="replace"):
# return the value portion of key=val decoded.
- return blob.split(b'=', 1)[1].decode(encoding, errors)
+ return blob.split(b"=", 1)[1].decode(encoding, errors)
@mock.patch("cloudinit.util.load_file")
def test_non_utf8_in_environment(self, m_load_file):
"""env may have non utf-8 decodable content."""
content = self.null.join(
- (self.bootflag, self.simple1, self.simple2, self.mixed))
+ (self.bootflag, self.simple1, self.simple2, self.mixed)
+ )
m_load_file.return_value = content
self.assertEqual(
- {'BOOTABLE_FLAG': self._val_decoded(self.bootflag),
- 'HOME': '/', 'PATH': '/bin:/sbin',
- 'MIXED': self._val_decoded(self.mixed)},
- util.get_proc_env(1))
+ {
+ "BOOTABLE_FLAG": self._val_decoded(self.bootflag),
+ "HOME": "/",
+ "PATH": "/bin:/sbin",
+ "MIXED": self._val_decoded(self.mixed),
+ },
+ util.get_proc_env(1),
+ )
self.assertEqual(1, m_load_file.call_count)
@mock.patch("cloudinit.util.load_file")
@@ -838,8 +2246,9 @@ class TestGetProcEnv(helpers.TestCase):
m_load_file.return_value = content
self.assertEqual(
- dict([t.split(b'=') for t in lines]),
- util.get_proc_env(1, encoding=None))
+ dict([t.split(b"=") for t in lines]),
+ util.get_proc_env(1, encoding=None),
+ )
self.assertEqual(1, m_load_file.call_count)
@mock.patch("cloudinit.util.load_file")
@@ -848,8 +2257,8 @@ class TestGetProcEnv(helpers.TestCase):
content = self.null.join((self.simple1, self.simple2))
m_load_file.return_value = content
self.assertEqual(
- {'HOME': '/', 'PATH': '/bin:/sbin'},
- util.get_proc_env(1))
+ {"HOME": "/", "PATH": "/bin:/sbin"}, util.get_proc_env(1)
+ )
self.assertEqual(1, m_load_file.call_count)
@mock.patch("cloudinit.util.load_file")
@@ -867,16 +2276,17 @@ class TestGetProcEnv(helpers.TestCase):
self.assertEqual(my_ppid, util.get_proc_ppid(my_pid))
-class TestKernelVersion():
+class TestKernelVersion:
"""test kernel version function"""
params = [
- ('5.6.19-300.fc32.x86_64', (5, 6)),
- ('4.15.0-101-generic', (4, 15)),
- ('3.10.0-1062.12.1.vz7.131.10', (3, 10)),
- ('4.18.0-144.el8.x86_64', (4, 18))]
+ ("5.6.19-300.fc32.x86_64", (5, 6)),
+ ("4.15.0-101-generic", (4, 15)),
+ ("3.10.0-1062.12.1.vz7.131.10", (3, 10)),
+ ("4.18.0-144.el8.x86_64", (4, 18)),
+ ]
- @mock.patch('os.uname')
+ @mock.patch("os.uname")
@pytest.mark.parametrize("uname_release,expected", params)
def test_kernel_version(self, m_uname, uname_release, expected):
m_uname.return_value.release = uname_release
@@ -884,49 +2294,48 @@ class TestKernelVersion():
class TestFindDevs:
- @mock.patch('cloudinit.subp.subp')
+ @mock.patch("cloudinit.subp.subp")
def test_find_devs_with(self, m_subp):
m_subp.return_value = (
'/dev/sda1: UUID="some-uuid" TYPE="ext4" PARTUUID="some-partid"',
- ''
+ "",
)
devlist = util.find_devs_with()
assert devlist == [
- '/dev/sda1: UUID="some-uuid" TYPE="ext4" PARTUUID="some-partid"']
+ '/dev/sda1: UUID="some-uuid" TYPE="ext4" PARTUUID="some-partid"'
+ ]
devlist = util.find_devs_with("LABEL_FATBOOT=A_LABEL")
assert devlist == [
- '/dev/sda1: UUID="some-uuid" TYPE="ext4" PARTUUID="some-partid"']
+ '/dev/sda1: UUID="some-uuid" TYPE="ext4" PARTUUID="some-partid"'
+ ]
- @mock.patch('cloudinit.subp.subp')
+ @mock.patch("cloudinit.subp.subp")
def test_find_devs_with_openbsd(self, m_subp):
- m_subp.return_value = (
- 'cd0:,sd0:630d98d32b5d3759,sd1:,fd0:', ''
- )
+ m_subp.return_value = ("cd0:,sd0:630d98d32b5d3759,sd1:,fd0:", "")
devlist = util.find_devs_with_openbsd()
- assert devlist == ['/dev/cd0a', '/dev/sd1i']
+ assert devlist == ["/dev/cd0a", "/dev/sd1a", "/dev/sd1i"]
- @mock.patch('cloudinit.subp.subp')
+ @mock.patch("cloudinit.subp.subp")
def test_find_devs_with_openbsd_with_criteria(self, m_subp):
- m_subp.return_value = (
- 'cd0:,sd0:630d98d32b5d3759,sd1:,fd0:', ''
- )
+ m_subp.return_value = ("cd0:,sd0:630d98d32b5d3759,sd1:,fd0:", "")
devlist = util.find_devs_with_openbsd(criteria="TYPE=iso9660")
- assert devlist == ['/dev/cd0a']
+ assert devlist == ["/dev/cd0a", "/dev/sd1a", "/dev/sd1i"]
# lp: #1841466
devlist = util.find_devs_with_openbsd(criteria="LABEL_FATBOOT=A_LABEL")
- assert devlist == ['/dev/cd0a', '/dev/sd1i']
+ assert devlist == ["/dev/cd0a", "/dev/sd1a", "/dev/sd1i"]
@pytest.mark.parametrize(
- 'criteria,expected_devlist', (
- (None, ['/dev/msdosfs/EFISYS', '/dev/iso9660/config-2']),
- ('TYPE=iso9660', ['/dev/iso9660/config-2']),
- ('TYPE=vfat', ['/dev/msdosfs/EFISYS']),
- ('LABEL_FATBOOT=A_LABEL', []), # lp: #1841466
+ "criteria,expected_devlist",
+ (
+ (None, ["/dev/msdosfs/EFISYS", "/dev/iso9660/config-2"]),
+ ("TYPE=iso9660", ["/dev/iso9660/config-2"]),
+ ("TYPE=vfat", ["/dev/msdosfs/EFISYS"]),
+ ("LABEL_FATBOOT=A_LABEL", []), # lp: #1841466
),
)
- @mock.patch('glob.glob')
+ @mock.patch("glob.glob")
def test_find_devs_with_freebsd(self, m_glob, criteria, expected_devlist):
def fake_glob(pattern):
msdos = ["/dev/msdosfs/EFISYS"]
@@ -936,58 +2345,54 @@ class TestFindDevs:
elif pattern == "/dev/iso9660/*":
return iso9660
raise Exception
+
m_glob.side_effect = fake_glob
devlist = util.find_devs_with_freebsd(criteria=criteria)
assert devlist == expected_devlist
@pytest.mark.parametrize(
- 'criteria,expected_devlist', (
- (None, ['/dev/ld0', '/dev/dk0', '/dev/dk1', '/dev/cd0']),
- ('TYPE=iso9660', ['/dev/cd0']),
- ('TYPE=vfat', ["/dev/ld0", "/dev/dk0", "/dev/dk1"]),
- ('LABEL_FATBOOT=A_LABEL', # lp: #1841466
- ['/dev/ld0', '/dev/dk0', '/dev/dk1', '/dev/cd0']),
- )
+ "criteria,expected_devlist",
+ (
+ (None, ["/dev/ld0", "/dev/dk0", "/dev/dk1", "/dev/cd0"]),
+ ("TYPE=iso9660", ["/dev/cd0"]),
+ ("TYPE=vfat", ["/dev/ld0", "/dev/dk0", "/dev/dk1"]),
+ (
+ "LABEL_FATBOOT=A_LABEL", # lp: #1841466
+ ["/dev/ld0", "/dev/dk0", "/dev/dk1", "/dev/cd0"],
+ ),
+ ),
)
@mock.patch("cloudinit.subp.subp")
def test_find_devs_with_netbsd(self, m_subp, criteria, expected_devlist):
side_effect_values = [
("ld0 dk0 dk1 cd0", ""),
(
- (
- "mscdlabel: CDIOREADTOCHEADER: "
- "Inappropriate ioctl for device\n"
- "track (ctl=4) at sector 0\n"
- "disklabel not written\n"
- ),
+ "mscdlabel: CDIOREADTOCHEADER: "
+ "Inappropriate ioctl for device\n"
+ "track (ctl=4) at sector 0\n"
+ "disklabel not written\n",
"",
),
(
- (
- "mscdlabel: CDIOREADTOCHEADER: "
- "Inappropriate ioctl for device\n"
- "track (ctl=4) at sector 0\n"
- "disklabel not written\n"
- ),
+ "mscdlabel: CDIOREADTOCHEADER: "
+ "Inappropriate ioctl for device\n"
+ "track (ctl=4) at sector 0\n"
+ "disklabel not written\n",
"",
),
(
- (
- "mscdlabel: CDIOREADTOCHEADER: "
- "Inappropriate ioctl for device\n"
- "track (ctl=4) at sector 0\n"
- "disklabel not written\n"
- ),
+ "mscdlabel: CDIOREADTOCHEADER: "
+ "Inappropriate ioctl for device\n"
+ "track (ctl=4) at sector 0\n"
+ "disklabel not written\n",
"",
),
(
- (
- "track (ctl=4) at sector 0\n"
- 'ISO filesystem, label "config-2", '
- "creation time: 2020/03/31 17:29\n"
- "adding as 'a'\n"
- ),
+ "track (ctl=4) at sector 0\n"
+ 'ISO filesystem, label "config-2", '
+ "creation time: 2020/03/31 17:29\n"
+ "adding as 'a'\n",
"",
),
]
@@ -995,4 +2400,25 @@ class TestFindDevs:
devlist = util.find_devs_with_netbsd(criteria=criteria)
assert devlist == expected_devlist
+ @pytest.mark.parametrize(
+ "criteria,expected_devlist",
+ (
+ (None, ["/dev/vbd0", "/dev/cd0", "/dev/acd0"]),
+ ("TYPE=iso9660", ["/dev/cd0", "/dev/acd0"]),
+ ("TYPE=vfat", ["/dev/vbd0"]),
+ (
+ "LABEL_FATBOOT=A_LABEL", # lp: #1841466
+ ["/dev/vbd0", "/dev/cd0", "/dev/acd0"],
+ ),
+ ),
+ )
+ @mock.patch("cloudinit.subp.subp")
+ def test_find_devs_with_dragonflybsd(
+ self, m_subp, criteria, expected_devlist
+ ):
+ m_subp.return_value = ("md2 md1 cd0 vbd0 acd0 vn3 vn2 vn1 vn0 md0", "")
+ devlist = util.find_devs_with_dragonflybsd(criteria=criteria)
+ assert devlist == expected_devlist
+
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_version.py b/tests/unittests/test_version.py
new file mode 100644
index 00000000..8ac8aea6
--- /dev/null
+++ b/tests/unittests/test_version.py
@@ -0,0 +1,32 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from unittest import mock
+
+from cloudinit import version
+from tests.unittests.helpers import CiTestCase
+
+
+class TestExportsFeatures(CiTestCase):
+ def test_has_network_config_v1(self):
+ self.assertIn("NETWORK_CONFIG_V1", version.FEATURES)
+
+ def test_has_network_config_v2(self):
+ self.assertIn("NETWORK_CONFIG_V2", version.FEATURES)
+
+
+class TestVersionString(CiTestCase):
+ @mock.patch(
+ "cloudinit.version._PACKAGED_VERSION", "17.2-3-gb05b9972-0ubuntu1"
+ )
+ def test_package_version_respected(self):
+ """If _PACKAGED_VERSION is filled in, then it should be returned."""
+ self.assertEqual("17.2-3-gb05b9972-0ubuntu1", version.version_string())
+
+ @mock.patch("cloudinit.version._PACKAGED_VERSION", "@@PACKAGED_VERSION@@")
+ @mock.patch("cloudinit.version.__VERSION__", "17.2")
+ def test_package_version_skipped(self):
+ """If _PACKAGED_VERSION is not modified, then return __VERSION__."""
+ self.assertEqual("17.2", version.version_string())
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_vmware/test_guestcust_util.py b/tests/unittests/test_vmware/test_guestcust_util.py
deleted file mode 100644
index c8b59d83..00000000
--- a/tests/unittests/test_vmware/test_guestcust_util.py
+++ /dev/null
@@ -1,98 +0,0 @@
-# Copyright (C) 2019 Canonical Ltd.
-# Copyright (C) 2019 VMware INC.
-#
-# Author: Xiaofeng Wang <xiaofengw@vmware.com>
-#
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit import subp
-from cloudinit.sources.helpers.vmware.imc.config import Config
-from cloudinit.sources.helpers.vmware.imc.config_file import ConfigFile
-from cloudinit.sources.helpers.vmware.imc.guestcust_util import (
- get_tools_config,
- set_gc_status,
-)
-from cloudinit.tests.helpers import CiTestCase, mock
-
-
-class TestGuestCustUtil(CiTestCase):
- def test_get_tools_config_not_installed(self):
- """
- This test is designed to verify the behavior if vmware-toolbox-cmd
- is not installed.
- """
- with mock.patch.object(subp, 'which', return_value=None):
- self.assertEqual(
- get_tools_config('section', 'key', 'defaultVal'), 'defaultVal')
-
- def test_get_tools_config_internal_exception(self):
- """
- This test is designed to verify the behavior if internal exception
- is raised.
- """
- with mock.patch.object(subp, 'which', return_value='/dummy/path'):
- with mock.patch.object(subp, 'subp',
- return_value=('key=value', b''),
- side_effect=subp.ProcessExecutionError(
- "subp failed", exit_code=99)):
- # verify return value is 'defaultVal', not 'value'.
- self.assertEqual(
- get_tools_config('section', 'key', 'defaultVal'),
- 'defaultVal')
-
- def test_get_tools_config_normal(self):
- """
- This test is designed to verify the value could be parsed from
- key = value of the given [section]
- """
- with mock.patch.object(subp, 'which', return_value='/dummy/path'):
- # value is not blank
- with mock.patch.object(subp, 'subp',
- return_value=('key = value ', b'')):
- self.assertEqual(
- get_tools_config('section', 'key', 'defaultVal'),
- 'value')
- # value is blank
- with mock.patch.object(subp, 'subp',
- return_value=('key = ', b'')):
- self.assertEqual(
- get_tools_config('section', 'key', 'defaultVal'),
- '')
- # value contains =
- with mock.patch.object(subp, 'subp',
- return_value=('key=Bar=Wark', b'')):
- self.assertEqual(
- get_tools_config('section', 'key', 'defaultVal'),
- 'Bar=Wark')
-
- # value contains specific characters
- with mock.patch.object(subp, 'subp',
- return_value=('[a] b.c_d=e-f', b'')):
- self.assertEqual(
- get_tools_config('section', 'key', 'defaultVal'),
- 'e-f')
-
- def test_set_gc_status(self):
- """
- This test is designed to verify the behavior of set_gc_status
- """
- # config is None, return None
- self.assertEqual(set_gc_status(None, 'Successful'), None)
-
- # post gc status is NO, return None
- cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
- conf = Config(cf)
- self.assertEqual(set_gc_status(conf, 'Successful'), None)
-
- # post gc status is YES, subp is called to execute command
- cf._insertKey("MISC|POST-GC-STATUS", "YES")
- conf = Config(cf)
- with mock.patch.object(subp, 'subp',
- return_value=('ok', b'')) as mockobj:
- self.assertEqual(
- set_gc_status(conf, 'Successful'), ('ok', b''))
- mockobj.assert_called_once_with(
- ['vmware-rpctool', 'info-set guestinfo.gc.status Successful'],
- rcs=[0])
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/test_vmware_config_file.py b/tests/unittests/test_vmware_config_file.py
deleted file mode 100644
index 9c7d25fa..00000000
--- a/tests/unittests/test_vmware_config_file.py
+++ /dev/null
@@ -1,529 +0,0 @@
-# Copyright (C) 2015 Canonical Ltd.
-# Copyright (C) 2016 VMware INC.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-# Pengpeng Sun <pengpengs@vmware.com>
-#
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import logging
-import os
-import sys
-import tempfile
-import textwrap
-
-from cloudinit.sources.DataSourceOVF import get_network_config_from_conf
-from cloudinit.sources.DataSourceOVF import read_vmware_imc
-from cloudinit.sources.helpers.vmware.imc.boot_proto import BootProtoEnum
-from cloudinit.sources.helpers.vmware.imc.config import Config
-from cloudinit.sources.helpers.vmware.imc.config_file import ConfigFile
-from cloudinit.sources.helpers.vmware.imc.config_nic import gen_subnet
-from cloudinit.sources.helpers.vmware.imc.config_nic import NicConfigurator
-from cloudinit.tests.helpers import CiTestCase
-
-logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
-logger = logging.getLogger(__name__)
-
-
-class TestVmwareConfigFile(CiTestCase):
-
- def test_utility_methods(self):
- """Tests basic utility methods of ConfigFile class"""
- cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
-
- cf.clear()
-
- self.assertEqual(0, len(cf), "clear size")
-
- cf._insertKey(" PASSWORD|-PASS ", " foo ")
- cf._insertKey("BAR", " ")
-
- self.assertEqual(2, len(cf), "insert size")
- self.assertEqual('foo', cf["PASSWORD|-PASS"], "password")
- self.assertTrue("PASSWORD|-PASS" in cf, "hasPassword")
- self.assertFalse(cf.should_keep_current_value("PASSWORD|-PASS"),
- "keepPassword")
- self.assertFalse(cf.should_remove_current_value("PASSWORD|-PASS"),
- "removePassword")
- self.assertFalse("FOO" in cf, "hasFoo")
- self.assertTrue(cf.should_keep_current_value("FOO"), "keepFoo")
- self.assertFalse(cf.should_remove_current_value("FOO"), "removeFoo")
- self.assertTrue("BAR" in cf, "hasBar")
- self.assertFalse(cf.should_keep_current_value("BAR"), "keepBar")
- self.assertTrue(cf.should_remove_current_value("BAR"), "removeBar")
-
- def test_datasource_instance_id(self):
- """Tests instance id for the DatasourceOVF"""
- cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
-
- instance_id_prefix = 'iid-vmware-'
-
- conf = Config(cf)
-
- (md1, _, _) = read_vmware_imc(conf)
- self.assertIn(instance_id_prefix, md1["instance-id"])
- self.assertEqual(md1["instance-id"], 'iid-vmware-imc')
-
- (md2, _, _) = read_vmware_imc(conf)
- self.assertIn(instance_id_prefix, md2["instance-id"])
- self.assertEqual(md2["instance-id"], 'iid-vmware-imc')
-
- self.assertEqual(md2["instance-id"], md1["instance-id"])
-
- def test_configfile_static_2nics(self):
- """Tests Config class for a configuration with two static NICs."""
- cf = ConfigFile("tests/data/vmware/cust-static-2nic.cfg")
-
- conf = Config(cf)
-
- self.assertEqual('myhost1', conf.host_name, "hostName")
- self.assertEqual('Africa/Abidjan', conf.timezone, "tz")
- self.assertTrue(conf.utc, "utc")
-
- self.assertEqual(['10.20.145.1', '10.20.145.2'],
- conf.name_servers,
- "dns")
- self.assertEqual(['eng.vmware.com', 'proxy.vmware.com'],
- conf.dns_suffixes,
- "suffixes")
-
- nics = conf.nics
- ipv40 = nics[0].staticIpv4
-
- self.assertEqual(2, len(nics), "nics")
- self.assertEqual('NIC1', nics[0].name, "nic0")
- self.assertEqual('00:50:56:a6:8c:08', nics[0].mac, "mac0")
- self.assertEqual(BootProtoEnum.STATIC, nics[0].bootProto, "bootproto0")
- self.assertEqual('10.20.87.154', ipv40[0].ip, "ipv4Addr0")
- self.assertEqual('255.255.252.0', ipv40[0].netmask, "ipv4Mask0")
- self.assertEqual(2, len(ipv40[0].gateways), "ipv4Gw0")
- self.assertEqual('10.20.87.253', ipv40[0].gateways[0], "ipv4Gw0_0")
- self.assertEqual('10.20.87.105', ipv40[0].gateways[1], "ipv4Gw0_1")
-
- self.assertEqual(1, len(nics[0].staticIpv6), "ipv6Cnt0")
- self.assertEqual('fc00:10:20:87::154',
- nics[0].staticIpv6[0].ip,
- "ipv6Addr0")
-
- self.assertEqual('NIC2', nics[1].name, "nic1")
- self.assertTrue(not nics[1].staticIpv6, "ipv61 dhcp")
-
- def test_config_file_dhcp_2nics(self):
- """Tests Config class for a configuration with two DHCP NICs."""
- cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
-
- conf = Config(cf)
- nics = conf.nics
- self.assertEqual(2, len(nics), "nics")
- self.assertEqual('NIC1', nics[0].name, "nic0")
- self.assertEqual('00:50:56:a6:8c:08', nics[0].mac, "mac0")
- self.assertEqual(BootProtoEnum.DHCP, nics[0].bootProto, "bootproto0")
-
- def test_config_password(self):
- cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
-
- cf._insertKey("PASSWORD|-PASS", "test-password")
- cf._insertKey("PASSWORD|RESET", "no")
-
- conf = Config(cf)
- self.assertEqual('test-password', conf.admin_password, "password")
- self.assertFalse(conf.reset_password, "do not reset password")
-
- def test_config_reset_passwd(self):
- cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
-
- cf._insertKey("PASSWORD|-PASS", "test-password")
- cf._insertKey("PASSWORD|RESET", "random")
-
- conf = Config(cf)
- with self.assertRaises(ValueError):
- pw = conf.reset_password
- self.assertIsNone(pw)
-
- cf.clear()
- cf._insertKey("PASSWORD|RESET", "yes")
- self.assertEqual(1, len(cf), "insert size")
-
- conf = Config(cf)
- self.assertTrue(conf.reset_password, "reset password")
-
- def test_get_config_nameservers(self):
- """Tests DNS and nameserver settings in a configuration."""
- cf = ConfigFile("tests/data/vmware/cust-static-2nic.cfg")
-
- config = Config(cf)
-
- network_config = get_network_config_from_conf(config, False)
-
- self.assertEqual(1, network_config.get('version'))
-
- config_types = network_config.get('config')
- name_servers = None
- dns_suffixes = None
-
- for type in config_types:
- if type.get('type') == 'nameserver':
- name_servers = type.get('address')
- dns_suffixes = type.get('search')
- break
-
- self.assertEqual(['10.20.145.1', '10.20.145.2'],
- name_servers,
- "dns")
- self.assertEqual(['eng.vmware.com', 'proxy.vmware.com'],
- dns_suffixes,
- "suffixes")
-
- def test_gen_subnet(self):
- """Tests if gen_subnet properly calculates network subnet from
- IPv4 address and netmask"""
- ip_subnet_list = [['10.20.87.253', '255.255.252.0', '10.20.84.0'],
- ['10.20.92.105', '255.255.252.0', '10.20.92.0'],
- ['192.168.0.10', '255.255.0.0', '192.168.0.0']]
- for entry in ip_subnet_list:
- self.assertEqual(entry[2], gen_subnet(entry[0], entry[1]),
- "Subnet for a specified ip and netmask")
-
- def test_get_config_dns_suffixes(self):
- """Tests if get_network_config_from_conf properly
- generates nameservers and dns settings from a
- specified configuration"""
- cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
-
- config = Config(cf)
-
- network_config = get_network_config_from_conf(config, False)
-
- self.assertEqual(1, network_config.get('version'))
-
- config_types = network_config.get('config')
- name_servers = None
- dns_suffixes = None
-
- for type in config_types:
- if type.get('type') == 'nameserver':
- name_servers = type.get('address')
- dns_suffixes = type.get('search')
- break
-
- self.assertEqual([],
- name_servers,
- "dns")
- self.assertEqual(['eng.vmware.com'],
- dns_suffixes,
- "suffixes")
-
- def test_get_nics_list_dhcp(self):
- """Tests if NicConfigurator properly calculates network subnets
- for a configuration with a list of DHCP NICs"""
- cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
-
- config = Config(cf)
-
- nicConfigurator = NicConfigurator(config.nics, False)
- nics_cfg_list = nicConfigurator.generate()
-
- self.assertEqual(2, len(nics_cfg_list), "number of config elements")
-
- nic1 = {'name': 'NIC1'}
- nic2 = {'name': 'NIC2'}
- for cfg in nics_cfg_list:
- if cfg.get('name') == nic1.get('name'):
- nic1.update(cfg)
- elif cfg.get('name') == nic2.get('name'):
- nic2.update(cfg)
-
- self.assertEqual('physical', nic1.get('type'), 'type of NIC1')
- self.assertEqual('NIC1', nic1.get('name'), 'name of NIC1')
- self.assertEqual('00:50:56:a6:8c:08', nic1.get('mac_address'),
- 'mac address of NIC1')
- subnets = nic1.get('subnets')
- self.assertEqual(1, len(subnets), 'number of subnets for NIC1')
- subnet = subnets[0]
- self.assertEqual('dhcp', subnet.get('type'), 'DHCP type for NIC1')
- self.assertEqual('auto', subnet.get('control'), 'NIC1 Control type')
-
- self.assertEqual('physical', nic2.get('type'), 'type of NIC2')
- self.assertEqual('NIC2', nic2.get('name'), 'name of NIC2')
- self.assertEqual('00:50:56:a6:5a:de', nic2.get('mac_address'),
- 'mac address of NIC2')
- subnets = nic2.get('subnets')
- self.assertEqual(1, len(subnets), 'number of subnets for NIC2')
- subnet = subnets[0]
- self.assertEqual('dhcp', subnet.get('type'), 'DHCP type for NIC2')
- self.assertEqual('auto', subnet.get('control'), 'NIC2 Control type')
-
- def test_get_nics_list_static(self):
- """Tests if NicConfigurator properly calculates network subnets
- for a configuration with 2 static NICs"""
- cf = ConfigFile("tests/data/vmware/cust-static-2nic.cfg")
-
- config = Config(cf)
-
- nicConfigurator = NicConfigurator(config.nics, False)
- nics_cfg_list = nicConfigurator.generate()
-
- self.assertEqual(2, len(nics_cfg_list), "number of elements")
-
- nic1 = {'name': 'NIC1'}
- nic2 = {'name': 'NIC2'}
- route_list = []
- for cfg in nics_cfg_list:
- cfg_type = cfg.get('type')
- if cfg_type == 'physical':
- if cfg.get('name') == nic1.get('name'):
- nic1.update(cfg)
- elif cfg.get('name') == nic2.get('name'):
- nic2.update(cfg)
-
- self.assertEqual('physical', nic1.get('type'), 'type of NIC1')
- self.assertEqual('NIC1', nic1.get('name'), 'name of NIC1')
- self.assertEqual('00:50:56:a6:8c:08', nic1.get('mac_address'),
- 'mac address of NIC1')
-
- subnets = nic1.get('subnets')
- self.assertEqual(2, len(subnets), 'Number of subnets')
-
- static_subnet = []
- static6_subnet = []
-
- for subnet in subnets:
- subnet_type = subnet.get('type')
- if subnet_type == 'static':
- static_subnet.append(subnet)
- elif subnet_type == 'static6':
- static6_subnet.append(subnet)
- else:
- self.assertEqual(True, False, 'Unknown type')
- if 'route' in subnet:
- for route in subnet.get('routes'):
- route_list.append(route)
-
- self.assertEqual(1, len(static_subnet), 'Number of static subnet')
- self.assertEqual(1, len(static6_subnet), 'Number of static6 subnet')
-
- subnet = static_subnet[0]
- self.assertEqual('10.20.87.154', subnet.get('address'),
- 'IPv4 address of static subnet')
- self.assertEqual('255.255.252.0', subnet.get('netmask'),
- 'NetMask of static subnet')
- self.assertEqual('auto', subnet.get('control'),
- 'control for static subnet')
-
- subnet = static6_subnet[0]
- self.assertEqual('fc00:10:20:87::154', subnet.get('address'),
- 'IPv6 address of static subnet')
- self.assertEqual('64', subnet.get('netmask'),
- 'NetMask of static6 subnet')
-
- route_set = set(['10.20.87.253', '10.20.87.105', '192.168.0.10'])
- for route in route_list:
- self.assertEqual(10000, route.get('metric'), 'metric of route')
- gateway = route.get('gateway')
- if gateway in route_set:
- route_set.discard(gateway)
- else:
- self.assertEqual(True, False, 'invalid gateway %s' % (gateway))
-
- self.assertEqual('physical', nic2.get('type'), 'type of NIC2')
- self.assertEqual('NIC2', nic2.get('name'), 'name of NIC2')
- self.assertEqual('00:50:56:a6:ef:7d', nic2.get('mac_address'),
- 'mac address of NIC2')
-
- subnets = nic2.get('subnets')
- self.assertEqual(1, len(subnets), 'Number of subnets for NIC2')
-
- subnet = subnets[0]
- self.assertEqual('static', subnet.get('type'), 'Subnet type')
- self.assertEqual('192.168.6.102', subnet.get('address'),
- 'Subnet address')
- self.assertEqual('255.255.0.0', subnet.get('netmask'),
- 'Subnet netmask')
-
- def test_custom_script(self):
- cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
- conf = Config(cf)
- self.assertIsNone(conf.custom_script_name)
- cf._insertKey("CUSTOM-SCRIPT|SCRIPT-NAME", "test-script")
- conf = Config(cf)
- self.assertEqual("test-script", conf.custom_script_name)
-
- def test_post_gc_status(self):
- cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
- conf = Config(cf)
- self.assertFalse(conf.post_gc_status)
- cf._insertKey("MISC|POST-GC-STATUS", "YES")
- conf = Config(cf)
- self.assertTrue(conf.post_gc_status)
-
- def test_no_default_run_post_script(self):
- cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
- conf = Config(cf)
- self.assertFalse(conf.default_run_post_script)
- cf._insertKey("MISC|DEFAULT-RUN-POST-CUST-SCRIPT", "NO")
- conf = Config(cf)
- self.assertFalse(conf.default_run_post_script)
-
- def test_yes_default_run_post_script(self):
- cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
- cf._insertKey("MISC|DEFAULT-RUN-POST-CUST-SCRIPT", "yes")
- conf = Config(cf)
- self.assertTrue(conf.default_run_post_script)
-
-
-class TestVmwareNetConfig(CiTestCase):
- """Test conversion of vmware config to cloud-init config."""
-
- maxDiff = None
-
- def _get_NicConfigurator(self, text):
- fp = None
- try:
- with tempfile.NamedTemporaryFile(mode="w", dir=self.tmp_dir(),
- delete=False) as fp:
- fp.write(text)
- fp.close()
- cfg = Config(ConfigFile(fp.name))
- return NicConfigurator(cfg.nics, use_system_devices=False)
- finally:
- if fp:
- os.unlink(fp.name)
-
- def test_non_primary_nic_without_gateway(self):
- """A non primary nic set is not required to have a gateway."""
- config = textwrap.dedent("""\
- [NETWORK]
- NETWORKING = yes
- BOOTPROTO = dhcp
- HOSTNAME = myhost1
- DOMAINNAME = eng.vmware.com
-
- [NIC-CONFIG]
- NICS = NIC1
-
- [NIC1]
- MACADDR = 00:50:56:a6:8c:08
- ONBOOT = yes
- IPv4_MODE = BACKWARDS_COMPATIBLE
- BOOTPROTO = static
- IPADDR = 10.20.87.154
- NETMASK = 255.255.252.0
- """)
- nc = self._get_NicConfigurator(config)
- self.assertEqual(
- [{'type': 'physical', 'name': 'NIC1',
- 'mac_address': '00:50:56:a6:8c:08',
- 'subnets': [
- {'control': 'auto', 'type': 'static',
- 'address': '10.20.87.154', 'netmask': '255.255.252.0'}]}],
- nc.generate())
-
- def test_non_primary_nic_with_gateway(self):
- """A non primary nic set can have a gateway."""
- config = textwrap.dedent("""\
- [NETWORK]
- NETWORKING = yes
- BOOTPROTO = dhcp
- HOSTNAME = myhost1
- DOMAINNAME = eng.vmware.com
-
- [NIC-CONFIG]
- NICS = NIC1
-
- [NIC1]
- MACADDR = 00:50:56:a6:8c:08
- ONBOOT = yes
- IPv4_MODE = BACKWARDS_COMPATIBLE
- BOOTPROTO = static
- IPADDR = 10.20.87.154
- NETMASK = 255.255.252.0
- GATEWAY = 10.20.87.253
- """)
- nc = self._get_NicConfigurator(config)
- self.assertEqual(
- [{'type': 'physical', 'name': 'NIC1',
- 'mac_address': '00:50:56:a6:8c:08',
- 'subnets': [
- {'control': 'auto', 'type': 'static',
- 'address': '10.20.87.154', 'netmask': '255.255.252.0',
- 'routes':
- [{'type': 'route', 'destination': '10.20.84.0/22',
- 'gateway': '10.20.87.253', 'metric': 10000}]}]}],
- nc.generate())
-
- def test_cust_non_primary_nic_with_gateway_(self):
- """A customer non primary nic set can have a gateway."""
- config = textwrap.dedent("""\
- [NETWORK]
- NETWORKING = yes
- BOOTPROTO = dhcp
- HOSTNAME = static-debug-vm
- DOMAINNAME = cluster.local
-
- [NIC-CONFIG]
- NICS = NIC1
-
- [NIC1]
- MACADDR = 00:50:56:ac:d1:8a
- ONBOOT = yes
- IPv4_MODE = BACKWARDS_COMPATIBLE
- BOOTPROTO = static
- IPADDR = 100.115.223.75
- NETMASK = 255.255.255.0
- GATEWAY = 100.115.223.254
-
-
- [DNS]
- DNSFROMDHCP=no
-
- NAMESERVER|1 = 8.8.8.8
-
- [DATETIME]
- UTC = yes
- """)
- nc = self._get_NicConfigurator(config)
- self.assertEqual(
- [{'type': 'physical', 'name': 'NIC1',
- 'mac_address': '00:50:56:ac:d1:8a',
- 'subnets': [
- {'control': 'auto', 'type': 'static',
- 'address': '100.115.223.75', 'netmask': '255.255.255.0',
- 'routes':
- [{'type': 'route', 'destination': '100.115.223.0/24',
- 'gateway': '100.115.223.254', 'metric': 10000}]}]}],
- nc.generate())
-
- def test_a_primary_nic_with_gateway(self):
- """A primary nic set can have a gateway."""
- config = textwrap.dedent("""\
- [NETWORK]
- NETWORKING = yes
- BOOTPROTO = dhcp
- HOSTNAME = myhost1
- DOMAINNAME = eng.vmware.com
-
- [NIC-CONFIG]
- NICS = NIC1
-
- [NIC1]
- MACADDR = 00:50:56:a6:8c:08
- ONBOOT = yes
- IPv4_MODE = BACKWARDS_COMPATIBLE
- BOOTPROTO = static
- IPADDR = 10.20.87.154
- NETMASK = 255.255.252.0
- PRIMARY = true
- GATEWAY = 10.20.87.253
- """)
- nc = self._get_NicConfigurator(config)
- self.assertEqual(
- [{'type': 'physical', 'name': 'NIC1',
- 'mac_address': '00:50:56:a6:8c:08',
- 'subnets': [
- {'control': 'auto', 'type': 'static',
- 'address': '10.20.87.154', 'netmask': '255.255.252.0',
- 'gateway': '10.20.87.253'}]}],
- nc.generate())
-
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/util.py b/tests/unittests/util.py
new file mode 100644
index 00000000..79a6e1d0
--- /dev/null
+++ b/tests/unittests/util.py
@@ -0,0 +1,145 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+from cloudinit import cloud, distros, helpers
+from cloudinit.sources.DataSourceNone import DataSourceNone
+
+
+def get_cloud(distro=None, paths=None, sys_cfg=None, metadata=None):
+ """Obtain a "cloud" that can be used for testing.
+
+ Modules take a 'cloud' parameter to call into things that are
+ datasource/distro specific. In most cases, the specifics of this cloud
+ implementation aren't needed to test the module, so provide a fake
+ datasource/distro with stubbed calls to methods that may attempt to
+ read/write files or shell out. If a specific distro is needed, it can
+ be passed in as the distro parameter.
+ """
+ paths = paths or helpers.Paths({})
+ sys_cfg = sys_cfg or {}
+ cls = distros.fetch(distro) if distro else MockDistro
+ mydist = cls(distro, sys_cfg, paths)
+ myds = DataSourceTesting(sys_cfg, mydist, paths)
+ if metadata:
+ myds.metadata.update(metadata)
+ if paths:
+ paths.datasource = myds
+ return cloud.Cloud(myds, paths, sys_cfg, mydist, None)
+
+
+def abstract_to_concrete(abclass):
+ """Takes an abstract class and returns a concrete version of it."""
+
+ class concreteCls(abclass):
+ pass
+
+ concreteCls.__abstractmethods__ = frozenset()
+ return type("DummyConcrete" + abclass.__name__, (concreteCls,), {})
+
+
+class DataSourceTesting(DataSourceNone):
+ def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False):
+ return "hostname"
+
+ def persist_instance_data(self):
+ return True
+
+ @property
+ def fallback_interface(self):
+ return None
+
+ @property
+ def cloud_name(self):
+ return "testing"
+
+
+class MockDistro(distros.Distro):
+ # MockDistro is here to test base Distro class implementations
+ def __init__(self, name="testingdistro", cfg=None, paths=None):
+ if not cfg:
+ cfg = {}
+ if not paths:
+ paths = {}
+ super(MockDistro, self).__init__(name, cfg, paths)
+
+ def install_packages(self, pkglist):
+ pass
+
+ def set_hostname(self, hostname, fqdn=None):
+ pass
+
+ def uses_systemd(self):
+ return True
+
+ def get_primary_arch(self):
+ return "i386"
+
+ def get_package_mirror_info(self, arch=None, data_source=None):
+ pass
+
+ def apply_network(self, settings, bring_up=True):
+ return False
+
+ def generate_fallback_config(self):
+ return {}
+
+ def apply_network_config(self, netconfig, bring_up=False) -> bool:
+ return False
+
+ def apply_network_config_names(self, netconfig):
+ pass
+
+ def apply_locale(self, locale, out_fn=None):
+ pass
+
+ def set_timezone(self, tz):
+ pass
+
+ def _read_hostname(self, filename, default=None):
+ raise NotImplementedError()
+
+ def _write_hostname(self, hostname, filename):
+ raise NotImplementedError()
+
+ def _read_system_hostname(self):
+ raise NotImplementedError()
+
+ def update_hostname(self, hostname, fqdn, prev_hostname_fn):
+ pass
+
+ def update_etc_hosts(self, hostname, fqdn):
+ pass
+
+ def add_user(self, name, **kwargs):
+ pass
+
+ def add_snap_user(self, name, **kwargs):
+ return "snap_user"
+
+ def create_user(self, name, **kwargs):
+ return True
+
+ def lock_passwd(self, name):
+ pass
+
+ def expire_passwd(self, user):
+ pass
+
+ def set_passwd(self, user, passwd, hashed=False):
+ return True
+
+ def ensure_sudo_dir(self, path, sudo_base="/etc/sudoers"):
+ pass
+
+ def write_sudo_rules(self, user, rules, sudo_file=None):
+ pass
+
+ def create_group(self, name, members=None):
+ pass
+
+ def shutdown_command(self, *, mode, delay, message):
+ pass
+
+ def package_command(self, command, args=None, pkgs=None):
+ pass
+
+ def update_package_sources(self):
+ return (True, "yay")